+ * When set to a non-null value, the use of a custom factory implies the configuration options TRUST_ALL_CERTIFICATES,
+ * TLS_TRUST_MANAGERS_PROVIDER, and TLS_KEY_MANAGERS_PROVIDER are ignored.
+ */
+ Builder socketFactory(ConnectionSocketFactory socketFactory);
+
/**
* Configuration that defines an HTTP route planner that computes the route an HTTP request should take.
* May not be used in conjunction with {@link #proxyConfiguration(ProxyConfiguration)}.
@@ -452,6 +460,7 @@ private static final class DefaultBuilder implements Builder {
private HttpRoutePlanner httpRoutePlanner;
private CredentialsProvider credentialsProvider;
private DnsResolver dnsResolver;
+ private ConnectionSocketFactory socketFactory;
private DefaultBuilder() {
}
@@ -572,6 +581,16 @@ public void setDnsResolver(DnsResolver dnsResolver) {
dnsResolver(dnsResolver);
}
+ @Override
+ public Builder socketFactory(ConnectionSocketFactory socketFactory) {
+ this.socketFactory = socketFactory;
+ return this;
+ }
+
+ public void setSocketFactory(ConnectionSocketFactory socketFactory) {
+ socketFactory(socketFactory);
+ }
+
@Override
public Builder httpRoutePlanner(HttpRoutePlanner httpRoutePlanner) {
this.httpRoutePlanner = httpRoutePlanner;
@@ -654,9 +673,9 @@ public HttpClientConnectionManager create(ApacheHttpClient.DefaultBuilder config
private ConnectionSocketFactory getPreferredSocketFactory(ApacheHttpClient.DefaultBuilder configuration,
AttributeMap standardOptions) {
- // TODO v2 custom socket factory
- return new SdkTlsSocketFactory(getSslContext(standardOptions),
- getHostNameVerifier(standardOptions));
+ return Optional.ofNullable(configuration.socketFactory)
+ .orElseGet(() -> new SdkTlsSocketFactory(getSslContext(standardOptions),
+ getHostNameVerifier(standardOptions)));
}
private HostnameVerifier getHostNameVerifier(AttributeMap standardOptions) {
diff --git a/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/ApacheClientTlsAuthTest.java b/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/ApacheClientTlsAuthTest.java
index e3f5e3259c85..113de34d8ca5 100644
--- a/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/ApacheClientTlsAuthTest.java
+++ b/http-clients/apache-client/src/test/java/software/amazon/awssdk/http/apache/ApacheClientTlsAuthTest.java
@@ -30,8 +30,16 @@
import java.io.IOException;
import java.net.SocketException;
import java.net.URI;
+import java.security.KeyManagementException;
+import java.security.NoSuchAlgorithmException;
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLException;
+import javax.net.ssl.TrustManager;
import org.apache.http.NoHttpResponseException;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@@ -39,6 +47,7 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
import software.amazon.awssdk.http.FileStoreTlsKeyManagersProvider;
import software.amazon.awssdk.http.HttpExecuteRequest;
import software.amazon.awssdk.http.HttpExecuteResponse;
@@ -47,6 +56,7 @@
import software.amazon.awssdk.http.SdkHttpMethod;
import software.amazon.awssdk.http.SdkHttpRequest;
import software.amazon.awssdk.http.TlsKeyManagersProvider;
+import software.amazon.awssdk.http.apache.internal.conn.SdkTlsSocketFactory;
import software.amazon.awssdk.internal.http.NoneTlsKeyManagersProvider;
/**
@@ -170,6 +180,62 @@ public void defaultTlsKeyManagersProviderIsSystemPropertyProvider_explicitlySetT
}
}
+ @Test
+ public void build_notSettingSocketFactory_configuresClientWithDefaultSocketFactory() throws IOException,
+ NoSuchAlgorithmException,
+ KeyManagementException {
+ System.setProperty(SSL_KEY_STORE.property(), clientKeyStore.toAbsolutePath().toString());
+ System.setProperty(SSL_KEY_STORE_TYPE.property(), CLIENT_STORE_TYPE);
+ System.setProperty(SSL_KEY_STORE_PASSWORD.property(), STORE_PASSWORD);
+
+ TlsKeyManagersProvider provider = FileStoreTlsKeyManagersProvider.create(clientKeyStore,
+ CLIENT_STORE_TYPE,
+ STORE_PASSWORD);
+ KeyManager[] keyManagers = provider.keyManagers();
+
+ SSLContext sslcontext = SSLContext.getInstance("TLS");
+ sslcontext.init(keyManagers, null, null);
+
+ ConnectionSocketFactory socketFactory = new SdkTlsSocketFactory(sslcontext, NoopHostnameVerifier.INSTANCE);
+ ConnectionSocketFactory socketFactoryMock = Mockito.spy(socketFactory);
+
+ client = ApacheHttpClient.builder().build();
+
+ try {
+ HttpExecuteResponse httpExecuteResponse = makeRequestWithHttpClient(client);
+ assertThat(httpExecuteResponse.httpResponse().statusCode()).isEqualTo(200);
+ } finally {
+ System.clearProperty(SSL_KEY_STORE.property());
+ System.clearProperty(SSL_KEY_STORE_TYPE.property());
+ System.clearProperty(SSL_KEY_STORE_PASSWORD.property());
+ }
+
+ Mockito.verifyNoInteractions(socketFactoryMock);
+ }
+
+ @Test
+ public void build_settingCustomSocketFactory_configuresClientWithGivenSocketFactory() throws IOException,
+ NoSuchAlgorithmException,
+ KeyManagementException {
+ TlsKeyManagersProvider provider = FileStoreTlsKeyManagersProvider.create(clientKeyStore,
+ CLIENT_STORE_TYPE,
+ STORE_PASSWORD);
+ KeyManager[] keyManagers = provider.keyManagers();
+
+ SSLContext sslcontext = SSLContext.getInstance("TLS");
+ sslcontext.init(keyManagers, null, null);
+
+ ConnectionSocketFactory socketFactory = new SdkTlsSocketFactory(sslcontext, NoopHostnameVerifier.INSTANCE);
+ ConnectionSocketFactory socketFactoryMock = Mockito.spy(socketFactory);
+
+ client = ApacheHttpClient.builder()
+ .socketFactory(socketFactoryMock)
+ .build();
+ makeRequestWithHttpClient(client);
+
+ Mockito.verify(socketFactoryMock).createSocket(Mockito.any());
+ }
+
private HttpExecuteResponse makeRequestWithHttpClient(SdkHttpClient httpClient) throws IOException {
SdkHttpRequest httpRequest = SdkHttpFullRequest.builder()
.method(SdkHttpMethod.GET)
From 7e1bb486b32f442ca650f984b016e36c8e84445e Mon Sep 17 00:00:00 2001
From: Matthew Miller Update a model training job to request a new Debugger profiling configuration. Update a model training job to request a new Debugger profiling configuration or to change warm pool retention length. The environment variables to set in the Docker container. The status of the warm pool associated with the training job. The JupyterServer app settings. The sort order for results. The default is A filter that retrieves only training jobs with a specific warm pool status. The configuration of a heterogeneous cluster in JSON format. The duration of time in seconds to retain configured resources in a warm pool for subsequent training jobs. Describes the resources, including ML compute instances and ML storage volumes, to use for model training. The The The status of the training job. The status of the warm pool associated with the training job. Provides summary information about a training job. Configuration information for Debugger rules for profiling system and framework metrics. The training job The status of the warm pool. The billable time in seconds used by the warm pool. Billable time refers to the absolute wall-clock time. Multiply The name of the matching training job that reused the warm pool. Status and billing information about the warm pool. Turns off automatic rotation, and if a rotation is currently in progress, cancels the rotation. If you cancel a rotation in progress, it can leave the To turn on automatic rotation again, call RotateSecret. Required permissions: Turns off automatic rotation, and if a rotation is currently in progress, cancels the rotation. If you cancel a rotation in progress, it can leave the To turn on automatic rotation again, call RotateSecret. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret. For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use Required permissions: To encrypt the secret with a KMS key other than Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret. For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except Required permissions: To encrypt the secret with a KMS key other than Deletes the resource-based permission policy attached to the secret. To attach a policy to a secret, use PutResourcePolicy. Required permissions: Deletes the resource-based permission policy attached to the secret. To attach a policy to a secret, use PutResourcePolicy. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Deletes a secret and all of its versions. You can specify a recovery window during which you can restore the secret. The minimum recovery window is 7 days. The default recovery window is 30 days. Secrets Manager attaches a You can't delete a primary secret that is replicated to other Regions. You must first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you delete a replica, it is deleted immediately. You can't directly delete a version of a secret. Instead, you remove all staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, and then Secrets Manager can automatically delete the version in the background. To determine whether an application still uses a secret, you can create an Amazon CloudWatch alarm to alert you to any attempts to access a secret during the recovery window. For more information, see Monitor secrets scheduled for deletion. Secrets Manager performs the permanent secret deletion at the end of the waiting period as a background task with low priority. There is no guarantee of a specific time after the recovery window for the permanent delete to occur. At any time before recovery window ends, you can use RestoreSecret to remove the When a secret is scheduled for deletion, you cannot retrieve the secret value. You must first cancel the deletion with RestoreSecret and then you can retrieve the secret. Required permissions: Deletes a secret and all of its versions. You can specify a recovery window during which you can restore the secret. The minimum recovery window is 7 days. The default recovery window is 30 days. Secrets Manager attaches a You can't delete a primary secret that is replicated to other Regions. You must first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you delete a replica, it is deleted immediately. You can't directly delete a version of a secret. Instead, you remove all staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, and then Secrets Manager can automatically delete the version in the background. To determine whether an application still uses a secret, you can create an Amazon CloudWatch alarm to alert you to any attempts to access a secret during the recovery window. For more information, see Monitor secrets scheduled for deletion. Secrets Manager performs the permanent secret deletion at the end of the waiting period as a background task with low priority. There is no guarantee of a specific time after the recovery window for the permanent delete to occur. At any time before recovery window ends, you can use RestoreSecret to remove the When a secret is scheduled for deletion, you cannot retrieve the secret value. You must first cancel the deletion with RestoreSecret and then you can retrieve the secret. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager only returns fields that have a value in the response. Required permissions: Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager only returns fields that have a value in the response. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support. Required permissions: Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Retrieves the JSON text of the resource-based policy document attached to the secret. For more information about permissions policies attached to a secret, see Permissions policies attached to a secret. Required permissions: Retrieves the JSON text of the resource-based policy document attached to the secret. For more information about permissions policies attached to a secret, see Permissions policies attached to a secret. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Retrieves the contents of the encrypted fields We recommend that you cache your secret values by using client-side caching. Caching secrets improves speed and reduces your costs. For more information, see Cache secrets for your applications. To retrieve the previous version of a secret, use Required permissions: Retrieves the contents of the encrypted fields We recommend that you cache your secret values by using client-side caching. Caching secrets improves speed and reduces your costs. For more information, see Cache secrets for your applications. To retrieve the previous version of a secret, use Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Lists the versions of a secret. Secrets Manager uses staging labels to indicate the different versions of a secret. For more information, see Secrets Manager concepts: Versions. To list the secrets in the account, use ListSecrets. Required permissions: Lists the versions of a secret. Secrets Manager uses staging labels to indicate the different versions of a secret. For more information, see Secrets Manager concepts: Versions. To list the secrets in the account, use ListSecrets. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console. ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret. To list the versions of a secret, use ListSecretVersionIds. To get the secret value from For information about finding secrets in the console, see Find secrets in Secrets Manager. Required permissions: Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console. ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret. To list the versions of a secret, use ListSecretVersionIds. To get the secret value from For information about finding secrets in the console, see Find secrets in Secrets Manager. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Attaches a resource-based permission policy to a secret. A resource-based policy is optional. For more information, see Authentication and access control for Secrets Manager For information about attaching a policy in the console, see Attach a permissions policy to a secret. Required permissions: Attaches a resource-based permission policy to a secret. A resource-based policy is optional. For more information, see Authentication and access control for Secrets Manager For information about attaching a policy in the console, see Attach a permissions policy to a secret. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new We recommend you avoid calling You can specify the staging labels to attach to the new version in If this operation moves the staging label This operation is idempotent. If you call this operation with a Required permissions: Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new We recommend you avoid calling You can specify the staging labels to attach to the new version in This operation is idempotent. If you call this operation with a Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except Required permissions: For a secret that is replicated to other Regions, deletes the secret replicas from the Regions you specify. Required permissions: For a secret that is replicated to other Regions, deletes the secret replicas from the Regions you specify. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Replicates the secret to a new Regions. See Multi-Region secrets. Required permissions: Replicates the secret to a new Regions. See Multi-Region secrets. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Cancels the scheduled deletion of a secret by removing the Required permissions: Cancels the scheduled deletion of a secret by removing the Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Configures and starts the asynchronous process of rotating the secret. For more information about rotation, see Rotate secrets. If you include the configuration parameters, the operation sets the values for the secret and then immediately starts a rotation. If you don't include the configuration parameters, the operation starts a rotation with the values already stored in the secret. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the secret value is in the JSON structure of a database secret. In particular, if you want to use the alternating users strategy, your secret must contain the ARN of a superuser secret. To configure rotation, you also need the ARN of an Amazon Web Services Lambda function and the schedule for the rotation. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the database or service to match. After testing the new credentials, the function marks the new secret version with the staging label You can create the Lambda rotation function based on the rotation function templates that Secrets Manager provides. Choose a template that matches your Rotation strategy. When rotation is successful, the Required permissions: Configures and starts the asynchronous process of rotating the secret. For more information about rotation, see Rotate secrets. If you include the configuration parameters, the operation sets the values for the secret and then immediately starts a rotation. If you don't include the configuration parameters, the operation starts a rotation with the values already stored in the secret. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the secret value is in the JSON structure of a database secret. In particular, if you want to use the alternating users strategy, your secret must contain the ARN of a superuser secret. To configure rotation, you also need the ARN of an Amazon Web Services Lambda function and the schedule for the rotation. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the database or service to match. After testing the new credentials, the function marks the new secret version with the staging label You can create the Lambda rotation function based on the rotation function templates that Secrets Manager provides. Choose a template that matches your Rotation strategy. When rotation is successful, the Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Removes the link between the replica secret and the primary secret and promotes the replica to a primary secret in the replica Region. You must call this operation from the Region in which you want to promote the replica to a primary secret. Required permissions: Removes the link between the replica secret and the primary secret and promotes the replica to a primary secret in the replica Region. You must call this operation from the Region in which you want to promote the replica to a primary secret. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Attaches tags to a secret. Tags consist of a key name and a value. Tags are part of the secret's metadata. They are not associated with specific versions of the secret. This operation appends tags to the existing list of tags. The following restrictions apply to tags: Maximum number of tags per secret: 50 Maximum key length: 127 Unicode characters in UTF-8 Maximum value length: 255 Unicode characters in UTF-8 Tag keys and values are case sensitive. Do not use the If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @. If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error. Required permissions: Attaches tags to a secret. Tags consist of a key name and a value. Tags are part of the secret's metadata. They are not associated with specific versions of the secret. This operation appends tags to the existing list of tags. The following restrictions apply to tags: Maximum number of tags per secret: 50 Maximum key length: 127 Unicode characters in UTF-8 Maximum value length: 255 Unicode characters in UTF-8 Tag keys and values are case sensitive. Do not use the If you use your tagging schema across multiple services and resources, other services might have restrictions on allowed characters. Generally allowed characters: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . _ : / @. If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Removes specific tags from a secret. This operation is idempotent. If a requested tag is not attached to the secret, no error is returned and the secret metadata is unchanged. If you use tags as part of your security strategy, then removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error. Required permissions: Removes specific tags from a secret. This operation is idempotent. If a requested tag is not attached to the secret, no error is returned and the secret metadata is unchanged. If you use tags as part of your security strategy, then removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue. To change the rotation configuration of a secret, use RotateSecret instead. We recommend you avoid calling If you include If you call this operation with a Required permissions: Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue. To change the rotation configuration of a secret, use RotateSecret instead. We recommend you avoid calling If you include If you call this operation with a Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except Required permissions: Modifies the staging labels attached to a version of a secret. Secrets Manager uses staging labels to track a version as it progresses through the secret rotation process. Each staging label can be attached to only one version at a time. To add a staging label to a version when it is already attached to another version, Secrets Manager first removes it from the other version first and then attaches it to this one. For more information about versions and staging labels, see Concepts: Version. The staging labels that you specify in the You can move the Whenever you move If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager. Required permissions: Modifies the staging labels attached to a version of a secret. Secrets Manager uses staging labels to track a version as it progresses through the secret rotation process. Each staging label can be attached to only one version at a time. To add a staging label to a version when it is already attached to another version, Secrets Manager first removes it from the other version first and then attaches it to this one. For more information about versions and staging labels, see Concepts: Version. The staging labels that you specify in the You can move the Whenever you move If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: Validates that a resource policy does not grant a wide range of principals access to your secret. A resource-based policy is optional for secrets. The API performs three checks when validating the policy: Sends a call to Zelkova, an automated reasoning engine, to ensure your resource policy does not allow broad access to your secret, for example policies that use a wildcard for the principal. Checks for correct syntax in a policy. Verifies the policy does not lock out a caller. Required permissions: Validates that a resource policy does not grant a wide range of principals access to your secret. A resource-based policy is optional for secrets. The API performs three checks when validating the policy: Sends a call to Zelkova, an automated reasoning engine, to ensure your resource policy does not allow broad access to your secret, for example policies that use a wildcard for the principal. Checks for correct syntax in a policy. Verifies the policy does not lock out a caller. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account. For more information, see Environment account connections in the Proton Administrator guide. In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account. For more information, see Environment account connections in the Proton User guide. Attempts to cancel a component deployment (for a component that is in the For more information about components, see Proton components in the Proton Administrator Guide. Attempts to cancel a component deployment (for a component that is in the For more information about components, see Proton components in the Proton User Guide. Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateEnvironment action succeeds before the cancellation attempt starts, the resulting deployment state is Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateServiceInstance action succeeds before the cancellation attempt starts, the resulting deployment state is Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is The following list includes potential cancellation scenarios. If the cancellation attempt succeeds, the resulting deployment state is If the cancellation attempt fails, the resulting deployment state is If the current UpdateServicePipeline action succeeds before the cancellation attempt starts, the resulting deployment state is Create an Proton component. A component is an infrastructure extension for a service instance. For more information about components, see Proton components in the Proton Administrator Guide. Create an Proton component. A component is an infrastructure extension for a service instance. For more information about components, see Proton components in the Proton User Guide. Deploy a new environment. An Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. You can provision environments using the following methods: Amazon Web Services-managed provisioning: Proton makes direct calls to provision your resources. Self-managed provisioning: Proton makes pull requests on your repository to provide compiled infrastructure as code (IaC) files that your IaC engine uses to provision resources. For more information, see Environments and Provisioning methods in the Proton Administrator Guide. Deploy a new environment. An Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. You can provision environments using the following methods: Amazon Web Services-managed provisioning: Proton makes direct calls to provision your resources. Self-managed provisioning: Proton makes pull requests on your repository to provide compiled infrastructure as code (IaC) files that your IaC engine uses to provision resources. For more information, see Environments and Provisioning methods in the Proton User Guide. Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account. An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the Proton Administrator guide. Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account. An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the Proton User guide. Create an environment template for Proton. For more information, see Environment Templates in the Proton Administrator Guide. You can create an environment template in one of the two following ways: Register and publish a standard environment template that instructs Proton to deploy and manage environment infrastructure. Register and publish a customer managed environment template that connects Proton to your existing provisioned infrastructure that you manage. Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the Create an environment template for Proton. For more information, see Environment Templates in the Proton User Guide. You can create an environment template in one of the two following ways: Register and publish a standard environment template that instructs Proton to deploy and manage environment infrastructure. Register and publish a customer managed environment template that connects Proton to your existing provisioned infrastructure that you manage. Proton doesn't manage your existing provisioned infrastructure. To create an environment template for customer provisioned and managed infrastructure, include the Create and register a link to a repository that can be used with self-managed provisioning (infrastructure or pipelines) or for template sync configurations. When you create a repository link, Proton creates a service-linked role for you. For more information, see Self-managed provisioning, Template bundles, and Template sync configurations in the Proton Administrator Guide. Create and register a link to a repository. Proton uses the link to repeatedly access the repository, to either push to it (self-managed provisioning) or pull from it (template sync). You can share a linked repository across multiple resources (like environments using self-managed provisioning, or synced templates). When you create a repository link, Proton creates a service-linked role for you. For more information, see Self-managed provisioning, Template bundles, and Template sync configurations in the Proton User Guide. Create an Proton service. An Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the Proton Administrator Guide and Services in the Proton User Guide. Create an Proton service. An Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the Proton User Guide. Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CI/CD service pipeline. Developers, in turn, select the service template from Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the Proton Administrator Guide. Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CI/CD service pipeline. Developers, in turn, select the service template from Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Proton templates in the Proton User Guide. Set up a template to create new template versions automatically. When a commit is pushed to your registered repository, Proton checks for changes to your repository template bundles. If it detects a template bundle change, a new major or minor version of its template is created, if the version doesn’t already exist. For more information, see Template sync configurations in the Proton Administrator Guide. Set up a template to create new template versions automatically by tracking a linked repository. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. When a commit is pushed to your linked repository, Proton checks for changes to your repository template bundles. If it detects a template bundle change, a new major or minor version of its template is created, if the version doesn’t already exist. For more information, see Template sync configurations in the Proton User Guide. Delete an Proton component resource. For more information about components, see Proton components in the Proton Administrator Guide. Delete an Proton component resource. For more information about components, see Proton components in the Proton User Guide. In an environment account, delete an environment account connection. After you delete an environment account connection that’s in use by an Proton environment, Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection. For more information, see Environment account connections in the Proton Administrator guide. In an environment account, delete an environment account connection. After you delete an environment account connection that’s in use by an Proton environment, Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection. For more information, see Environment account connections in the Proton User guide. Delete a service, with its instances and pipeline. You can't delete a service if it has any service instances that have components attached to them. For more information about components, see Proton components in the Proton Administrator Guide. Delete a service, with its instances and pipeline. You can't delete a service if it has any service instances that have components attached to them. For more information about components, see Proton components in the Proton User Guide. Get detail data for the Proton pipeline service role. Get detail data for Proton account-wide settings. Get detailed data for a component. For more information about components, see Proton components in the Proton Administrator Guide. Get detailed data for a component. For more information about components, see Proton components in the Proton User Guide. In an environment account, get the detailed data for an environment account connection. For more information, see Environment account connections in the Proton Administrator guide. In an environment account, get the detailed data for an environment account connection. For more information, see Environment account connections in the Proton User guide. Get detail data for a repository. Get detail data for a linked repository. Get the sync status of a repository used for Proton template sync. For more information about template sync, see . A repository sync status isn't tied to the Proton Repository resource (or any other Proton resource). Therefore, tags on an Proton Repository resource have no effect on this action. Specifically, you can't use these tags to control access to this action using Attribute-based access control (ABAC). For more information about ABAC, see ABAC in the Proton Administrator Guide. Get the sync status of a repository used for Proton template sync. For more information about template sync, see . A repository sync status isn't tied to the Proton Repository resource (or any other Proton resource). Therefore, tags on an Proton Repository resource have no effect on this action. Specifically, you can't use these tags to control access to this action using Attribute-based access control (ABAC). For more information about ABAC, see ABAC in the Proton User Guide. Get a list of component Infrastructure as Code (IaC) outputs. For more information about components, see Proton components in the Proton Administrator Guide. Get a list of component Infrastructure as Code (IaC) outputs. For more information about components, see Proton components in the Proton User Guide. List provisioned resources for a component with details. For more information about components, see Proton components in the Proton Administrator Guide. List provisioned resources for a component with details. For more information about components, see Proton components in the Proton User Guide. List components with summary data. You can filter the result list by environment, service, or a single service instance. For more information about components, see Proton components in the Proton Administrator Guide. List components with summary data. You can filter the result list by environment, service, or a single service instance. For more information about components, see Proton components in the Proton User Guide. View a list of environment account connections. For more information, see Environment account connections in the Proton Administrator guide. View a list of environment account connections. For more information, see Environment account connections in the Proton User guide. List repositories with detail data. List linked repositories with detail data. List tags for a resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. List tags for a resource. For more information, see Proton resources and tagging in the Proton User Guide. Notify Proton of status changes to a provisioned resource when you use self-managed provisioning. For more information, see Self-managed provisioning in the Proton Administrator Guide. Notify Proton of status changes to a provisioned resource when you use self-managed provisioning. For more information, see Self-managed provisioning in the Proton User Guide. In a management account, reject an environment account connection from another environment account. After you reject an environment account connection request, you can't accept or use the rejected environment account connection. You can’t reject an environment account connection that's connected to an environment. For more information, see Environment account connections in the Proton Administrator guide. In a management account, reject an environment account connection from another environment account. After you reject an environment account connection request, you can't accept or use the rejected environment account connection. You can’t reject an environment account connection that's connected to an environment. For more information, see Environment account connections in the Proton User guide. Tag a resource. A tag is a key-value pair of metadata that you associate with an Proton resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. Tag a resource. A tag is a key-value pair of metadata that you associate with an Proton resource. For more information, see Proton resources and tagging in the Proton User Guide. Remove a customer tag from a resource. A tag is a key-value pair of metadata associated with an Proton resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. Remove a customer tag from a resource. A tag is a key-value pair of metadata associated with an Proton resource. For more information, see Proton resources and tagging in the Proton User Guide. Update the Proton service pipeline role or repository settings. Update Proton settings that are used for multiple services in the Amazon Web Services account. Update a component. There are a few modes for updating a component. The You can't update a component while its deployment status, or the deployment status of a service instance attached to it, is For more information about components, see Proton components in the Proton Administrator Guide. Update a component. There are a few modes for updating a component. The You can't update a component while its deployment status, or the deployment status of a service instance attached to it, is For more information about components, see Proton components in the Proton User Guide. Update an environment. If the environment is associated with an environment account connection, don't update or include the You can only update to a new environment account connection if that connection was created in the same environment account that the current environment account connection was created in. The account connection must also be associated with the current environment. If the environment isn't associated with an environment account connection, don't update or include the You can update either the If the environment was configured for Amazon Web Services-managed provisioning, omit the If the environment was configured for self-managed provisioning, specify the For more information, see Environments and Provisioning methods in the Proton Administrator Guide. There are four modes for updating an environment. The In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated. In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use. In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version. Update an environment. If the environment is associated with an environment account connection, don't update or include the You can only update to a new environment account connection if that connection was created in the same environment account that the current environment account connection was created in. The account connection must also be associated with the current environment. If the environment isn't associated with an environment account connection, don't update or include the You can update either the If the environment was configured for Amazon Web Services-managed provisioning, omit the If the environment was configured for self-managed provisioning, specify the For more information, see Environments and Provisioning methods in the Proton User Guide. There are four modes for updating an environment. The In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated. In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use. In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version. In an environment account, update an environment account connection to use a new IAM role. For more information, see Environment account connections in the Proton Administrator guide. In an environment account, update an environment account connection to use a new IAM role. For more information, see Environment account connections in the Proton User guide. Edit a service description or use a spec to add and delete service instances. Existing service instances and the service pipeline can't be edited using this API. They can only be deleted. Use the Edit the You can't delete a service instance (remove it from the spec) if it has an attached component. For more information about components, see Proton components in the Proton Administrator Guide. Edit a service description or use a spec to add and delete service instances. Existing service instances and the service pipeline can't be edited using this API. They can only be deleted. Use the Edit the You can't delete a service instance (remove it from the spec) if it has an attached component. For more information about components, see Proton components in the Proton User Guide. Update a service instance. There are a few modes for updating a service instance. The You can't update a service instance while its deployment status, or the deployment status of a component attached to it, is For more information about components, see Proton components in the Proton Administrator Guide. Update a service instance. There are a few modes for updating a service instance. The You can't update a service instance while its deployment status, or the deployment status of a component attached to it, is For more information about components, see Proton components in the Proton User Guide. Update template sync configuration parameters, except for the Update template sync configuration parameters, except for the The repository configured in the Amazon Web Services account for pipeline provisioning. Required it if you have environments configured for self-managed provisioning with services that include pipelines. The linked repository for pipeline provisioning. Required if you have environments configured for self-managed provisioning with services that include pipelines. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning. The Proton pipeline service role and repository data shared across the Amazon Web Services account. Proton settings that are used for multiple services in the Amazon Web Services account. The service spec that the component uses to access service inputs. Provided when a component is attached to a service instance. Detailed data of an Proton component resource. For more information about components, see Proton components in the Proton Administrator Guide. Detailed data of an Proton component resource. For more information about components, see Proton components in the Proton User Guide. The name of the service that Summary data of an Proton component resource. For more information about components, see Proton components in the Proton Administrator Guide. Summary data of an Proton component resource. For more information about components, see Proton components in the Proton User Guide. An optional list of metadata items that you can associate with the Proton component. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton component. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. You must specify For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. You must specify For more information about components, see Proton components in the Proton User Guide. An optional list of metadata items that you can associate with the Proton environment account connection. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide. An optional list of metadata items that you can associate with the Proton environment account connection. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. You must specify For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. You must specify For more information about components, see Proton components in the Proton User Guide. The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. For more information, see Environment account connections in the Proton Administrator guide. To use Amazon Web Services-managed provisioning for the environment, specify either the The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. For more information, see Environment account connections in the Proton User guide. To use Amazon Web Services-managed provisioning for the environment, specify either the The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning. To use self-managed provisioning for the environment, specify this parameter and omit the The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. To use self-managed provisioning for the environment, specify this parameter and omit the A YAML formatted string that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the Proton Administrator Guide. A YAML formatted string that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the Proton User Guide. An optional list of metadata items that you can associate with the Proton environment. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton environment. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The name of the environment template. For more information, see Environment Templates in the Proton Administrator Guide. The name of the environment template. For more information, see Environment Templates in the Proton User Guide. An optional list of metadata items that you can associate with the Proton environment template. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton environment template. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. An optional list of metadata items that you can associate with the Proton environment template version. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton environment template version. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The Amazon Resource Name (ARN) of your Amazon Web Services CodeStar connection. For more information, see Setting up for Proton in the Proton Administrator Guide. The Amazon Resource Name (ARN) of your AWS CodeStar connection that connects Proton to your repository provider account. For more information, see Setting up for Proton in the Proton User Guide. An optional list of metadata items that you can associate with the Proton repository. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton repository. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The repository detail data that's returned by Proton. The repository link's detail data that's returned by Proton. The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline. The Amazon Resource Name (ARN) of the repository connection. For more information, see Setting up an AWS CodeStar connection in the Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline. A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the Proton Administrator Guide and Create a service in the Proton User Guide. A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the Proton User Guide. An optional list of metadata items that you can associate with the Proton service. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton service. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. By default, Proton provides a service pipeline for your service. When this parameter is included, it indicates that an Proton service pipeline isn't provided for your service. After it's included, it can't be changed. For more information, see Service template bundles in the Proton Administrator Guide. By default, Proton provides a service pipeline for your service. When this parameter is included, it indicates that an Proton service pipeline isn't provided for your service. After it's included, it can't be changed. For more information, see Template bundles in the Proton User Guide. An optional list of metadata items that you can associate with the Proton service template. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton service template. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. For more information about components, see Proton components in the Proton Administrator Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. For more information about components, see Proton components in the Proton User Guide. An optional list of metadata items that you can associate with the Proton service template version. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide. An optional list of metadata items that you can associate with the Proton service template version. A tag is a key-value pair. For more information, see Proton resources and tagging in the Proton User Guide. The branch of the registered repository for your template. The repository branch for your template. The name of your repository (for example, The repository name (for example, The name of the repository. The repository name. The repository detail data that's returned by Proton. The deleted repository link's detail data that's returned by Proton. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton User Guide. The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning. The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton User Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton User Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton User Guide. The name of the environment template a version of which you want to get detailed data for.. The name of the environment template a version of which you want to get detailed data for. The repository detail data that's returned by Proton. The repository link's detail data that's returned by Proton. An array of repositories. An array of repository links. The resource provisioning engine. At this time, For more information, see Self-managed provisioning in the Proton Administrator Guide. The resource provisioning engine. At this time, For more information, see Self-managed provisioning in the Proton User Guide. Detail data for a provisioned resource. The repository Amazon Resource Name (ARN). The Amazon Resource Name (ARN) of the linked repository. The repository Amazon Web Services CodeStar connection that connects Proton to your repository. The Amazon Resource Name (ARN) of your AWS CodeStar connection that connects Proton to your repository provider account. The repository provider. Detailed data of a repository that has been registered with Proton. Detailed data of a linked repository—a repository that has been registered with Proton. The Amazon Resource Name (ARN) of the repository branch. The Amazon Resource Name (ARN) of the linked repository. The repository provider. Detail data for a repository branch. Detail data for a linked repository branch. The repository provider. Detail input data for a repository branch. Detail input data for a linked repository branch. The Amazon Resource Name (ARN) for a repository. The Amazon Resource Name (ARN) of the linked repository. The repository provider. Summary data of a repository that has been registered with Proton. Summary data of a linked repository—a repository that has been registered with Proton. The resource that is synced to. The repository sync definition. A repository sync definition. The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide. The Amazon Resource Name (ARN) of the repository connection. For more information, see Setting up an AWS CodeStar connection in the Proton User Guide. A quota was exceeded. For more information, see Proton Quotas in the Proton Administrator Guide. A quota was exceeded. For more information, see Proton Quotas in the Proton User Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. For more information about components, see Proton components in the Proton Administrator Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. For more information about components, see Proton components in the Proton User Guide. The name of the repository, for example The repository name (for example, Set to A repository for pipeline provisioning. Specify it if you have environments configured for self-managed provisioning with services that include pipelines. A linked repository for pipeline provisioning. Specify it if you have environments configured for self-managed provisioning with services that include pipelines. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. To remove a previously configured repository, set The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning. The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning. To remove a previously configured ARN, specify an empty string. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account. The environment account connection must have a For more information about components, see Proton components in the Proton User Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton Administrator Guide. The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision. The environment must have a For more information about components, see Proton components in the Proton User Guide. The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning. The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository. Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the Proton Administrator Guide or the Proton User Guide. Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the Proton User Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. A change to For more information about components, see Proton components in the Proton Administrator Guide. An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version. A change to For more information about components, see Proton components in the Proton User Guide. The repository branch. The repository branch for your template. The name of the repository (for example, The repository name (for example, This is the Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the Proton service. The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use the Amazon Web Services CLI to access an API. For more information, see the Amazon Web Services Command Line Interface User Guide. The Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments. Because administrators define the infrastructure and tooling that Proton deploys and manages, they need permissions to use all of the listed API operations. When developers select a specific infrastructure and tooling set, Proton deploys their applications. To monitor their applications that are running on Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations. To learn more about Proton administration, see the Proton Administrator Guide. To learn more about deploying serverless and containerized applications on Proton, see the Proton User Guide. Ensuring Idempotency When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended. Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status. The following lists of APIs are grouped according to methods that ensure idempotency. Idempotent create APIs with a client token The API actions in this list support idempotency with the use of a client token. The corresponding Amazon Web Services CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs. Given a request action that has succeeded: If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response. If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created. If the original resource is deleted and you retry the request, a new resource is created. Idempotent create APIs with a client token: CreateEnvironmentTemplateVersion CreateServiceTemplateVersion CreateEnvironmentAccountConnection Idempotent create APIs Given a request action that has succeeded: If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response. If the original resource has been modified, the retry throws a If you retry with different input parameters, the retry throws a Idempotent create APIs: CreateEnvironmentTemplate CreateServiceTemplate CreateEnvironment CreateService Idempotent delete APIs Given a request action that has succeeded: When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response. If you retry and the resource doesn't exist, the response is empty. In both cases, the retry succeeds. Idempotent delete APIs: DeleteEnvironmentTemplate DeleteEnvironmentTemplateVersion DeleteServiceTemplate DeleteServiceTemplateVersion DeleteEnvironmentAccountConnection Asynchronous idempotent delete APIs Given a request action that has succeeded: If you retry the request with an API from this group, if the original request delete operation status is If the original request delete operation is complete, a retry returns an empty response. Asynchronous idempotent delete APIs: DeleteEnvironment DeleteService This is the Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the Proton service. The documentation for each action shows the Query API request parameters and the XML response. Alternatively, you can use the Amazon Web Services CLI to access an API. For more information, see the Amazon Web Services Command Line Interface User Guide. The Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments. Because administrators define the infrastructure and tooling that Proton deploys and manages, they need permissions to use all of the listed API operations. When developers select a specific infrastructure and tooling set, Proton deploys their applications. To monitor their applications that are running on Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations. To learn more about Proton, see the Proton User Guide. Ensuring Idempotency When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended. Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status. The following lists of APIs are grouped according to methods that ensure idempotency. Idempotent create APIs with a client token The API actions in this list support idempotency with the use of a client token. The corresponding Amazon Web Services CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs. Given a request action that has succeeded: If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response. If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created. If the original resource is deleted and you retry the request, a new resource is created. Idempotent create APIs with a client token: CreateEnvironmentTemplateVersion CreateServiceTemplateVersion CreateEnvironmentAccountConnection Idempotent create APIs Given a request action that has succeeded: If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response. If the original resource has been modified, the retry throws a If you retry with different input parameters, the retry throws a Idempotent create APIs: CreateEnvironmentTemplate CreateServiceTemplate CreateEnvironment CreateService Idempotent delete APIs Given a request action that has succeeded: When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response. If you retry and the resource doesn't exist, the response is empty. In both cases, the retry succeeds. Idempotent delete APIs: DeleteEnvironmentTemplate DeleteEnvironmentTemplateVersion DeleteServiceTemplate DeleteServiceTemplateVersion DeleteEnvironmentAccountConnection Asynchronous idempotent delete APIs Given a request action that has succeeded: If you retry the request with an API from this group, if the original request delete operation status is If the original request delete operation is complete, a retry returns an empty response. Asynchronous idempotent delete APIs: DeleteEnvironment DeleteService Creates a parallel data resource in Amazon Translate by importing an input file from Amazon S3. Parallel data files contain examples that show how you want segments of text to be translated. By adding parallel data, you can influence the style, tone, and word choice in your translation output. Creates or updates a custom terminology, depending on whether one already exists for the given terminology name. Importing a terminology with the same name as an existing one will merge the terminologies based on the chosen merge strategy. The only supported merge strategy is OVERWRITE, where the imported terminology overwrites the existing terminology of the same name. If you import a terminology that overwrites an existing one, the new terminology takes up to 10 minutes to fully propagate. After that, translations have access to the new terminology. Provides a list of your parallel data resources in Amazon Translate. Stops an asynchronous batch translation job that is in progress. If the job's state is Asynchronous batch translation jobs are started with the StartTextTranslationJob operation. You can use the DescribeTextTranslationJob or ListTextTranslationJobs operations to get a batch translation job's Translates input text from the source language to the target language. For a list of available languages and language codes, see what-is-languages. A unique identifier for the request. This token is automatically generated when you use Amazon Translate through an AWS SDK. The encryption key for the custom terminology being imported. You have made too many requests within a short period of time. Wait for a short time and then try your request again. Amazon Translate does not support translation from the language of the source text into the requested target language. For more information, see how-to-error-msg. Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials. Specifies whether users can upload diagnostic log files of Amazon WorkSpaces client directly to WorkSpaces to troubleshoot issues when using the WorkSpaces client. When enabled, the log files will be sent to WorkSpaces automatically and will be applied to all users in the specified directory. Describes an Amazon WorkSpaces client. The identifier of the AWS account that owns the image. The identifier of the Amazon Web Services account that owns the image. Returns detailed metadata about the specified ACM certificate. Returns detailed metadata about the specified ACM certificate. If you have just created a certificate using the Imports a certificate into Amazon Web Services Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide. ACM does not provide managed renewal for certificates that you import. Note the following guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is not self-signed, you must enter its certificate chain. If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain. The certificate, private key, and certificate chain must be PEM-encoded. The current time must be between the The The OCSP authority URL, if present, must not exceed 1000 characters. To import a new certificate, omit the When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using. The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. Imports a certificate into Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Certificate Manager User Guide. ACM does not provide managed renewal for certificates that you import. Note the following guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is not self-signed, you must enter its certificate chain. If a certificate chain is included, the issuer must be the subject of one of the certificates in the chain. The certificate, private key, and certificate chain must be PEM-encoded. The current time must be between the The The OCSP authority URL, if present, must not exceed 1000 characters. To import a new certificate, omit the When you import a certificate by using the CLI, you must specify the certificate, the certificate chain, and the private key by their file names preceded by When you import a certificate by using an SDK, you must specify the certificate, the certificate chain, and the private key files in the manner required by the programming language you're using. The cryptographic algorithm of an imported certificate must match the algorithm of the signing CA. For example, if the signing CA key type is RSA, then the certificate key type must also be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM PCA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide. Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM Private CA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide. Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner. ACM behavior differs from the https://tools.ietf.org/html/rfc6125#appendix-B.2RFC 6125 specification of the certificate validation process. first checks for a subject alternative name, and, if it finds one, ignores the common name (CN) Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner. ACM behavior differs from the RFC 6125 specification of the certificate validation process. ACM first checks for a Subject Alternative Name, and, if it finds one, ignores the common name (CN). After successful completion of the The date and time at which the certificate was imported. This value exists only when the certificate type is The date and time when the certificate was imported. This value exists only when the certificate type is The status of the certificate. The status of the certificate. A certificate enters status PENDING_VALIDATION upon being requested, unless it fails for any of the reasons given in the troubleshooting topic Certificate request fails. ACM makes repeated attempts to validate a certificate for 72 hours and then times out. If a certificate shows status FAILED or VALIDATION_TIMED_OUT, delete the request, correct the issue with DNS validation or Email validation, and try again. If validation succeeds, the certificate enters status ISSUED. The reason the certificate request failed. This value exists only when the certificate status is The reason the certificate request failed. This value exists only when the certificate status is The source of the certificate. For certificates provided by ACM, this value is The source of the certificate. For certificates provided by ACM, this value is The Amazon Resource Name (ARN) of the ACM PCA private certificate authority (CA) that issued the certificate. This has the following format: The Amazon Resource Name (ARN) of the private certificate authority (CA) that issued the certificate. This has the following format: Fully qualified domain name (FQDN), such as www.example.com or example.com, for the certificate. One or more domain names (subject alternative names) included in the certificate. This list contains the domain names that are bound to the public key that is contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website. When called by ListCertificates, this parameter will only return the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate. When called by ListCertificates, indicates whether the full list of subject alternative names has been included in the response. If false, the response includes all of the subject alternative names included in the certificate. If true, the response only includes the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate. The status of the certificate. A certificate enters status PENDING_VALIDATION upon being requested, unless it fails for any of the reasons given in the troubleshooting topic Certificate request fails. ACM makes repeated attempts to validate a certificate for 72 hours and then times out. If a certificate shows status FAILED or VALIDATION_TIMED_OUT, delete the request, correct the issue with DNS validation or Email validation, and try again. If validation succeeds, the certificate enters status ISSUED. The source of the certificate. For certificates provided by ACM, this value is The algorithm that was used to generate the public-private key pair. A list of Key Usage X.509 v3 extension objects. Each object is a string value that identifies the purpose of the public key contained in the certificate. Possible extension values include DIGITAL_SIGNATURE, KEY_ENCHIPHERMENT, NON_REPUDIATION, and more. Contains a list of Extended Key Usage X.509 v3 extension objects. Each object specifies a purpose for which the certificate public key can be used and consists of a name and an object identifier (OID). Indicates whether the certificate is currently in use by any Amazon Web Services resources. Indicates whether the certificate has been exported. This value exists only when the certificate type is Specifies whether the certificate is eligible for renewal. At this time, only exported private certificates can be renewed with the RenewCertificate command. The time before which the certificate is not valid. The time after which the certificate is not valid. The time at which the certificate was requested. The time at which the certificate was issued. This value exists only when the certificate type is The date and time when the certificate was imported. This value exists only when the certificate type is The time at which the certificate was revoked. This value exists only when the certificate status is This structure is returned in the response object of ListCertificates action. Passphrase to associate with the encrypted exported private key. If you want to later decrypt the private key, you must have the passphrase. You can use the following OpenSSL command to decrypt a private key: Passphrase to associate with the encrypted exported private key. When creating your passphrase, you can use any ASCII character except #, $, or %. If you want to later decrypt the private key, you must have the passphrase. You can use the following OpenSSL command to decrypt a private key. After entering the command, you are prompted for the passphrase. Specify one or more algorithms that can be used to generate key pairs. Default filtering returns only Specify one or more algorithms that can be used to generate key pairs. Default filtering returns only This structure can be used in the ListCertificates action to filter the output of the certificate list. Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the Specifies the field to sort results by. If you specify Specifies the order of sorted results. If you specify Fully qualified domain name (FQDN), such as www.example.com, that you want to secure with an ACM certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com. The first domain name you enter cannot exceed 64 octets, including periods. Each subsequent Subject Alternative Name (SAN), however, can be up to 253 octets in length. Fully qualified domain name (FQDN), such as www.example.com, that you want to secure with an ACM certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com. In compliance with RFC 5280, the length of the domain name (technically, the Common Name) that you provide cannot exceed 64 octets (characters), including periods. To add a longer domain name, specify it in the Subject Alternative Name field, which supports names up to 253 octets in length. The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Amazon Web Services Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form: The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Certificate Manager Private Certificate Authority user guide. The ARN must have the following form: You can use Amazon Web Services Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Amazon Web Services Certificate Manager User Guide. You can use Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Certificate Manager User Guide. Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A Creates a new Amazon File Cache resource. You can use this operation with a client request token in the request that Amazon File Cache uses to ensure idempotent creation. If a cache with the specified client request token exists and the parameters match, Creates a new, empty Amazon File Cache resourcewith an assigned ID, and an initial lifecycle state of Returns the description of the cache in JSON format. The Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the Amazon FSx for Lustre Amazon FSx for NetApp ONTAP Amazon FSx for OpenZFS Amazon FSx for Windows File Server This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a If a file system with the specified client request token exists and the parameters match, Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of Returns the description of the file system in JSON format. This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a The Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the Amazon FSx for Lustre Amazon FSx for NetApp ONTAP Amazon FSx for OpenZFS Amazon FSx for Windows File Server This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a If a file system with the specified client request token exists and the parameters match, Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of Returns the description of the file system in JSON format. The Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported only for file systems with the Deletes an Amazon File Cache resource. After deletion, the cache no longer exists, and its data is gone. The The data in a deleted cache is also deleted and can't be recovered by any means. Returns the description of specific Amazon FSx for Lustre data repository associations, if one or more You can use filters to narrow the response to include just data repository associations for specific file systems (use the When retrieving all data repository associations, you can paginate the response by using the optional Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more You can use filters to narrow the response to include just data repository associations for specific file systems (use the When retrieving all data repository associations, you can paginate the response by using the optional Returns the description of specific Amazon FSx for Lustre data repository tasks, if one or more When retrieving all tasks, you can paginate the response by using the optional Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository tasks, if one or more When retrieving all tasks, you can paginate the response by using the optional Returns the description of a specific Amazon File Cache resource, if a When retrieving all cache descriptions, you can optionally specify the This operation is used in an iterative process to retrieve a list of your cache descriptions. When using this operation, keep the following in mind: The implementation might return fewer than The order of caches returned in the response of one Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported only for file systems with the Updates the configuration of an existing Amazon File Cache resource. You can update multiple properties in a single request. The You can define any combination of event types for your The You can define any combination of event types for your Describes a data repository association's automatic export policy. The This Describes a data repository association's automatic export policy. The This The You can define any combination of event types for your Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your file system as you modify objects in a linked S3 bucket. This Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket. The (Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. A path on the file system that points to a high-level directory (such as This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash ( A path on the file system that points to a high-level directory (such as This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash ( Specifies the amount of data to release, in GiB, by an Amazon File Cache Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is Specifies the cache deployment type, which must be The configuration for a Lustre MDT (Metadata Target) storage volume. The Amazon File Cache configuration for the cache that you are creating. An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK. By using the idempotent operation, you can retry a The type of cache that you're creating, which must be Sets the Lustre version for the cache that you're creating, which must be The storage capacity of the cache in gibibytes (GiB). Valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB. A list of IDs specifying the security groups to apply to all network interfaces created for Amazon File Cache access. This list isn't returned in later requests to describe the cache. A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false. Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a The configuration for the Amazon File Cache resource being created. A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA configurations must meet the following requirements: All configurations on the list must be of the same data repository type, either all S3 or all NFS. A cache can't link to different data repository types at the same time. An NFS DRA must link to an NFS file system that supports the NFSv3 protocol. DRA automatic import and automatic export is not supported. A description of the cache that was created. The OpenZFS configuration for the file system that's being created. Sets the storage capacity of the OpenZFS file system that you're creating from a backup, in gibibytes (GiB). Valid values are from 64 GiB up to 524,288 GiB (512 TiB). However, the value that you specify must be equal to or greater than the backup's storage capacity value. If you don't use the If used to create a file system other than OpenZFS, you must provide a value that matches the backup's The request object for the The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name. Describes the state of a data repository association. The lifecycle can have the following values: Describes the state of a data repository association. The lifecycle can have the following values: A path on the file system that points to a high-level directory (such as This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash ( A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash ( The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format The path to the data repository that will be linked to the cache or file system. For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats: If you are not using the If you are using the For Amazon File Cache, the path can be an S3 bucket or prefix in the format For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository. The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association. The globally unique ID of the Amazon File Cache resource. A path on the Amazon File Cache that points to a high-level directory (such as This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory. The cache path can only be set to root (/) on an NFS DRA when The cache path cannot be set to root (/) for an S3 DRA. For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association. The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket. The data repository association configuration object is returned in the response of the following operations: Data repository associations are supported only for file systems with the The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations: Data repository associations are supported only for an Amazon FSx for Lustre file system with the The lifecycle status of the data repository task, as follows: You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the The lifecycle status of the data repository task, as follows: You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the The type of data repository task. The The The type of data repository task. The time that Amazon FSx began processing the task. The time the system began processing the task. The time that Amazon FSx completed processing the task, populated after the task is complete. The time the system completed processing the task, populated after the task is complete. The globally unique ID of the file system. An array of paths on the Amazon FSx for Lustre file system that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository. (Default) If An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository. (Default) If Provides the status of the number of files that the task has processed successfully and failed to process. Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache. The system-generated, unique ID of the cache. A description of the data repository task. You use data repository tasks to perform bulk transfer operations between your Amazon FSx file system and a linked data repository. A description of the data repository task. You use data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository. An Amazon File Cache resource uses a task to automatically release files from the cache. The time at which the task status was last updated. The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache. Provides the task status showing a running total of the total number of files to be processed, the number successfully processed, and the number of files the task failed to process. The ID of the cache that's being deleted. The ID of the cache that's being deleted. The cache lifecycle for the deletion request. If the An array of one ore more data repository association descriptions. An array of one or more data repository association descriptions. IDs of the caches whose descriptions you want to retrieve (String). The response object for the The system-generated, unique ID of the cache. The type of cache, which must be The Lustre version of the cache, which must be The lifecycle status of the cache. The following are the possible values and what they mean: A structure providing details of any failures that occurred. The storage capacity of the cache in gibibytes (GiB). The Domain Name System (DNS) name for the cache. Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a The configuration for the Amazon File Cache resource. A list of IDs of data repository associations that are associated with this cache. A description of a specific Amazon File Cache resource, which is a response object from the The system-generated, unique ID of the cache. The type of cache, which must be The Lustre version of the cache, which must be The lifecycle status of the cache. The following are the possible values and what they mean: A structure providing details of any failures that occurred. The storage capacity of the cache in gibibytes (GiB). The Domain Name System (DNS) name for the cache. Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a A boolean flag indicating whether tags for the cache should be copied to data repository associations. The configuration for the Amazon File Cache resource. A list of IDs of data repository associations that are associated with this cache. The response object for the Amazon File Cache resource being created in the A path on the cache that points to a high-level directory (such as This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory. The cache path can only be set to root (/) on an NFS DRA when The cache path cannot be set to root (/) for an S3 DRA. The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths: The path can be an NFS data repository that links to the cache. The path can be in one of two formats: If you are not using the If you are using the The path can be an S3 bucket or prefix in the format A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository. The configuration for a data repository association (DRA) to be created during the Amazon File Cache resource creation. The DRA links the cache to either an Amazon S3 bucket or prefix, or a Network File System (NFS) data repository that supports the NFSv3 protocol. The DRA does not support automatic import or automatic export. A message describing any failures that occurred. A structure providing details of any failures that occurred. Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is The deployment type of the Amazon File Cache resource, which must be You use the The configuration for a Lustre MDT (Metadata Target) storage volume. The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs. The configuration for the Amazon File Cache resource. The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is The configuration for a Lustre MDT (Metadata Target) storage volume. The metadata on Amazon File Cache is managed by a Lustre Metadata Server (MDS) while the actual metadata is persisted on an MDT. The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers. The configuration for an NFS data repository association (DRA) created during the creation of the Amazon File Cache resource. No caches were found based upon supplied parameters. The data repository events that are logged by Amazon FSx. The data repository events that are logged by Amazon FSx. Note that Amazon File Cache uses a default setting of The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system. The configuration for Lustre logging used to write the enabled logging events for your file system to Amazon CloudWatch Logs. When logging is enabled, Lustre logs error and warning events from data repository operations such as automatic export and data repository tasks. To learn more about Lustre logging, see Logging with Amazon CloudWatch Logs. The configuration for Lustre logging used to write the enabled logging events for your Amazon FSx for Lustre file system or Amazon File Cache resource to Amazon CloudWatch Logs. The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements: The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system. The name of the Amazon CloudWatch Logs log group must begin with the If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs If If The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements: The destination ARN that you provide must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system. The name of the Amazon CloudWatch Logs log group must begin with the If you do not provide a destination, Amazon FSx will create and use a log stream in the CloudWatch Logs If If The Lustre logging configuration used when creating or updating an Amazon FSx for Lustre file system. Lustre logging writes the enabled logging events for your file system to Amazon CloudWatch Logs. Error and warning events can be logged from the following data repository operations: Automatic export Data repository tasks To learn more about Lustre logging, see Logging to Amazon CloudWatch Logs. The Lustre logging configuration used when creating or updating an Amazon FSx for Lustre file system. An Amazon File Cache is created with Lustre logging enabled by default, with a setting of Lustre logging writes the enabled logging events for your file system or cache to Amazon CloudWatch Logs. A cache configuration is required for this operation. A volume configuration is required for this operation. The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers. This parameter is not supported for Amazon File Cache. The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository. Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket. The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration consists of an The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association. The configuration consists of an Data repository associations on Amazon File Cache don't use A list of security group IDs. A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system. The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and subnets in the Amazon VPC User Guide. A list of subnet IDs. Currently, you can specify only one subnet ID in a call to the A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the The configuration update for an Amazon File Cache resource. The ID of the cache that you are updating. The configuration updates for an Amazon File Cache resource. A description of the cache that was updated. Displays detailed information about a specified application. Returns a URL to access the job run dashboard. The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes. The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes. The configuration for an application to automatically stop after a certain amount of time being idle. The ID of the application. The ID of the job run. The URL to view job run's dashboard. The maximum number of applications that can be listed. The maximum number of job runs that can be listed. Create a workflow to orchestrate your migrations. Create a step in the migration workflow. Create a step group in a migration workflow. Delete a migration workflow. You must pause a running workflow in Migration Hub Orchestrator console to delete it. Delete a step in a migration workflow. Pause the workflow to delete a running step. Delete a step group in a migration workflow. Get the template you want to use for creating a migration workflow. Get a specific step in a template. Get a step group in a template. Get migration workflow. Get a step in the migration workflow. Get the step group of a migration workflow. List AWS Migration Hub Orchestrator plugins. List the tags added to a resource. List the step groups in a template. List the steps in a template. List the templates available in Migration Hub Orchestrator to create a migration workflow. List the step groups in a migration workflow. List the steps in a workflow. List the migration workflows. Retry a failed step in a migration workflow. Start a migration workflow. Stop an ongoing migration workflow. Tag a resource by specifying its Amazon Resource Name (ARN). Deletes the tags for a resource. Update a migration workflow. Update a step in a migration workflow. Update the step group in a migration workflow. You do not have sufficient access to perform this action. The name of the migration workflow. The description of the migration workflow. The ID of the template. The configuration ID of the application configured in Application Discovery Service. The input parameters required to create a migration workflow. The servers on which a step will be run. The tags to add on a migration workflow. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The name of the migration workflow. The description of the migration workflow. The ID of the template. The configuration ID of the application configured in Application Discovery Service. The inputs for creating a migration workflow. The servers on which a step will be run. The status of the migration workflow. The time at which the migration workflow was created. The tags to add on a migration workflow. The ID of the migration workflow that will contain the step group. The name of the step group. The description of the step group. The next step group. The previous step group. The ID of the migration workflow that contains the step group. The name of the step group. The ID of the step group. The description of the step group. List of AWS services utilized in a migration workflow. The next step group. The previous step group. The time at which the step group is created. The name of the step. The ID of the step group. The ID of the migration workflow. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The description of the step. The custom script to run tests on source or target environments. The servers on which a step will be run. The key value pairs added for the expected output. The previous step. The next step. The ID of the step. The ID of the step group. The ID of the migration workflow. The name of the step. The ID of the migration workflow you want to delete. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The status of the migration workflow. The ID of the migration workflow. The ID of the step group you want to delete. The ID of the step you want to delete. The ID of the step group that contains the step you want to delete. The ID of the migration workflow. The ID of the migration workflow. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The name of the migration workflow. The description of the migration workflow. The ID of the template. The configuration ID of the application configured in Application Discovery Service. The name of the application configured in Application Discovery Service. The status of the migration workflow. The status message of the migration workflow. The time at which the migration workflow was created. The time at which the migration workflow was last started. The time at which the migration workflow was last stopped. The time at which the migration workflow was last modified. The time at which the migration workflow ended. List of AWS services utilized in a migration workflow. The total number of steps in the migration workflow. Get a list of completed steps in the migration workflow. The inputs required for creating the migration workflow. The tags added to the migration workflow. The Amazon S3 bucket where the migration logs are stored. The ID of the template. The ID of the template. The name of the template. The time at which the template was last created. The inputs provided for the creation of the migration workflow. List of AWS services utilized in a migration workflow. The status of the template. The time at which the template was last created. The ID of the template. The ID of the step group. The ID of the template. The ID of the step group. The name of the step group. The description of the step group. The status of the step group. The time at which the step group was created. The time at which the step group was last modified. List of AWS services utilized in a migration workflow. The previous step group. The next step group. The ID of the step. The ID of the template. The ID of the step group. The ID of the step. The ID of the step group. The ID of the template. The name of the step. The description of the step. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The time at which the step was created. The previous step. The next step. The outputs of the step. The custom script to run tests on source or target environments. The ID of the step group. The ID of the migration workflow. The ID of the step group. The ID of the migration workflow. The name of the step group. The description of the step group. The status of the step group. The owner of the step group. The time at which the step group was created. The time at which the step group was last modified. The time at which the step group ended. List of AWS services utilized in a migration workflow. The previous step group. The next step group. The ID of the migration workflow. desThe ID of the step group. The ID of the step. The name of the step. The ID of the step group. The ID of the migration workflow. The ID of the step. The description of the step. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The owner of the step. The custom script to run tests on source or target environments. The servers on which a step will be run. The outputs of the step. The previous step. The next step. The status of the step. The status message of the migration workflow. The output location of the script. The time at which the step was created. The time at which the workflow was last started. The time at which the step ended. The number of servers that have been migrated. The number of servers that have failed to migrate. The total number of servers that have been migrated. An internal error has occurred. The maximum number of results that can be returned. The pagination token. The name of the template. The pagination token. The summary of the template. The maximum number of results that can be returned. The pagination token. The ID of the template. The name of the application configured in Application Discovery Service. The status of the migration workflow. The name of the migration workflow. The pagination token. The summary of the migration workflow. The maximum number of plugins that can be returned. The pagination token. The pagination token. Migration Hub Orchestrator plugins. The Amazon Resource Name (ARN) of the resource. The tags added to a resource. The maximum number of results that can be returned. The pagination token. The ID of the template. The pagination token. The summary of the step group in the template. The maximum number of results that can be returned. The pagination token. The ID of the template. The ID of the step group. The pagination token. The list of summaries of steps in a template. The pagination token. The maximum number of results that can be returned. The ID of the migration workflow. The pagination token. The summary of step groups in a migration workflow. The pagination token. The maximum number of results that can be returned. The ID of the migration workflow. The ID of the step group. The pagination token. The summary of steps in a migration workflow. The ID of the migration workflow. The name of the migration workflow. The ID of the template. The name of the application configured in Application Discovery Service. The status of the migration workflow. The time at which the migration workflow was created. The time at which the migration workflow ended. The status message of the migration workflow. The steps completed in the migration workflow. All the steps in a migration workflow. The summary of a migration workflow. Command for Linux. Command for Windows. Command to be run on a particular operating system. The script location for Linux. The script location for Windows. The script location for a particular operating system. The ID of the plugin. The name of the host. The status of the plugin. The IP address at which the plugin is located. The version of the plugin. The time at which the plugin was registered. The summary of the Migration Hub Orchestrator plugin. The resource is not available. The ID of the migration workflow. The ID of the step group. The ID of the step. The ID of the step group. The ID of the migration workflow. The ID of the step. The status of the step. The ID of the migration workflow. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The status of the migration workflow. The status message of the migration workflow. The time at which the migration workflow was last started. The Amazon S3 bucket where the script is located. The Amazon S3 key for the script location. The command to run the script. The source or target environment. The servers on which to run the script. The custom script to run tests on source or target environments. The value of the integer. String value. List of string values. Map of string values. A map of key value pairs that is generated when you create a migration workflow. The key value pairs will differ based on your selection of the template. The name of the step. The data type of the step output. Determine if an output is required from a step. The output of the step. The ID of the migration workflow. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The status of the migration workflow. The status message of the migration workflow. The time at which the migration workflow was stopped. The Amazon Resource Name (ARN) of the resource to which you want to add tags. A collection of labels, in the form of key:value pairs, that apply to this resource. The name of the template. The data type of the template input. Determine if an input is required from the template. The input parameters of a template. The ID of the step group. The name of the step group. The previous step group. The next step group. The summary of the step group in the template. The ID of the step. The ID of the step group. The ID of the template. The name of the step. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The servers on which to run the script. The owner of the step. The previous step. The next step. The summary of the step. The ID of the template. The name of the template. The Amazon Resource Name (ARN) of the template. The description of the template. The summary of the template. The request was denied due to request throttling. The name of an AWS service. The URL of an AWS service. List of AWS services utilized in a migration workflow. The Amazon Resource Name (ARN) of the resource from which you want to remove tags. One or more tag keys. Specify only the tag keys, not the tag values. The ID of the migration workflow. The name of the migration workflow. The description of the migration workflow. The input parameters required to update a migration workflow. The servers on which a step will be run. The ID of the migration workflow. The Amazon Resource Name (ARN) of the migration workflow. The name of the migration workflow. The description of the migration workflow. The ID of the template. The ID of the application configured in Application Discovery Service. The inputs required to update a migration workflow. The servers on which a step will be run. The status of the migration workflow. The time at which the migration workflow was created. The time at which the migration workflow was last modified. The tags added to the migration workflow. The ID of the migration workflow. The ID of the step group. The name of the step group. The description of the step group. The next step group. The previous step group. The ID of the migration workflow. The name of the step group. The ID of the step group. The description of the step group. List of AWS services utilized in a migration workflow. The next step group. The previous step group. The time at which the step group was last modified. The ID of the step. The ID of the step group. The ID of the migration workflow. The name of the step. The description of the step. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The custom script to run tests on the source and target environments. The servers on which a step will be run. The outputs of a step. The previous step. The next step. The status of the step. The ID of the step. The ID of the step group. The ID of the migration workflow. The name of the step. The input fails to satisfy the constraints specified by an AWS service. The Amazon S3 bucket where the script is located. The Amazon S3 key for the script location. The command required to run the script. The source or target environment. The servers on which to run the script. The custom script to run tests on source or target environments. The ID of the step group. The name of the step group. The owner of the step group. The status of the step group. The previous step group. The next step group. The summary of a step group in a workflow. The name of the step. The data type of the output. Determine if an output is required from a step. The value of the output. The output of a step. The integer value. The string value. The list of string value. A structure to hold multiple values of an output. The ID of the step. The name of the step. The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step. The owner of the step. The previous step. The next step. The status of the step. The status message of the migration workflow. The number of servers that have been migrated. The number of servers that have failed to migrate. The total number of servers that have been migrated. The description of the step. The location of the script. The summary of the step in a migration workflow. This API reference provides descriptions, syntax, and other details about each of the actions and data types for AWS Migration Hub Orchestrator. he topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using.Ascending
.KeepAlivePeriodInSeconds
value specified in the ResourceConfig
to update.ResourceConfig
to update KeepAlivePeriodInSeconds
. Other fields in the ResourceConfig
cannot be updated.ResourceConfig
to update warm pool retention length.
"
+ },
+ "ResourceRetainedBillableTimeInSeconds":{
+ "shape":"ResourceRetainedBillableTimeInSeconds",
+ "documentation":"InUse
: The warm pool is in use for the training job.Available
: The warm pool is available to reuse for a matching training job.Reused
: The warm pool moved to a matching training job for reuse.Terminated
: The warm pool is no longer available. Warm pools are unavailable if they are terminated by a user, terminated for a patch update, or terminated for exceeding the specified KeepAlivePeriodInSeconds
.ResourceRetainedBillableTimeInSeconds
by the number of instances (InstanceCount
) in your training cluster to get the total compute time SageMaker bills you if you run warm pool training. The formula is as follows: ResourceRetainedBillableTimeInSeconds * InstanceCount
.VersionStage
labels in an unexpected state. You might need to remove the staging label AWSPENDING
from the partially created version. You also need to determine whether to roll back to the previous version of the secret by moving the staging label AWSCURRENT
to the version that has AWSPENDING
. To determine which version has a specific staging label, call ListSecretVersionIds. Then use UpdateSecretVersionStage to change staging labels. For more information, see How rotation works.secretsmanager:CancelRotateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. VersionStage
labels in an unexpected state. You might need to remove the staging label AWSPENDING
from the partially created version. You also need to determine whether to roll back to the previous version of the secret by moving the staging label AWSCURRENT
to the version that has AWSPENDING
. To determine which version has a specific staging label, call ListSecretVersionIds. Then use UpdateSecretVersionStage to change staging labels. For more information, see How rotation works.secretsmanager:CancelRotateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
parameter or the SecretBinary
parameter, but not both. If you include SecretString
or SecretBinary
then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT
to it.SecretString
matches the JSON structure of a database secret.aws/secretsmanager
. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager
. Creating aws/secretsmanager
can result in a one-time significant delay in returning the result.aws/secretsmanager
to encrypt the secret, and you must create and use a customer managed KMS key. secretsmanager:CreateSecret
. If you include tags in the secret, you also need secretsmanager:TagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. aws/secretsmanager
, you need kms:GenerateDataKey
and kms:Decrypt
permission to the key. SecretString
parameter or the SecretBinary
parameter, but not both. If you include SecretString
or SecretBinary
then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT
to it.SecretString
matches the JSON structure of a database secret.aws/secretsmanager
. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager
. Creating aws/secretsmanager
can result in a one-time significant delay in returning the result.aws/secretsmanager
to encrypt the secret, and you must create and use a customer managed KMS key. SecretBinary
or SecretString
because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.secretsmanager:CreateSecret
. If you include tags in the secret, you also need secretsmanager:TagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. aws/secretsmanager
, you need kms:GenerateDataKey
and kms:Decrypt
permission to the key. secretsmanager:DeleteResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:DeleteResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. DeletionDate
stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.DeletionDate
and cancel the deletion of the secret.secretsmanager:DeleteSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. DeletionDate
stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.DeletionDate
and cancel the deletion of the secret.secretsmanager:DeleteSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:DescribeSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:DescribeSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:GetRandomPassword
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:GetRandomPassword
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:GetResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:GetResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
or SecretBinary
from the specified version of a secret, whichever contains content.VersionStage
and specify AWSPREVIOUS. To revert to the previous version of a secret, call UpdateSecretVersionStage.secretsmanager:GetSecretValue
. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key aws/secretsmanager
, then you also need kms:Decrypt
permissions for that key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
or SecretBinary
from the specified version of a secret, whichever contains content.VersionStage
and specify AWSPREVIOUS. To revert to the previous version of a secret, call UpdateSecretVersionStage.secretsmanager:GetSecretValue
. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key aws/secretsmanager
, then you also need kms:Decrypt
permissions for that key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:ListSecretVersionIds
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:ListSecretVersionIds
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
or SecretBinary
, call GetSecretValue.secretsmanager:ListSecrets
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
or SecretBinary
, call GetSecretValue.secretsmanager:ListSecrets
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:PutResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:PutResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
value or a new SecretBinary
value. PutSecretValue
at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue
more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.VersionStages
. If you don't include VersionStages
, then Secrets Manager automatically moves the staging label AWSCURRENT
to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT
to it .AWSCURRENT
from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.ClientRequestToken
that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.secretsmanager:PutSecretValue
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. SecretString
value or a new SecretBinary
value. PutSecretValue
at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue
more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.VersionStages
. If you don't include VersionStages
, then Secrets Manager automatically moves the staging label AWSCURRENT
to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT
to it. If this operation moves the staging label AWSCURRENT
from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.ClientRequestToken
that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.SecretBinary
or SecretString
because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.secretsmanager:PutSecretValue
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:RemoveRegionsFromReplication
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:RemoveRegionsFromReplication
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:ReplicateSecretToRegions
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:ReplicateSecretToRegions
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. DeletedDate
time stamp. You can access a secret again after it has been restored.secretsmanager:RestoreSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. DeletedDate
time stamp. You can access a secret again after it has been restored.secretsmanager:RestoreSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. AWSCURRENT
. Then anyone who retrieves the secret gets the new version. For more information, see How rotation works.AWSPENDING
staging label might be attached to the same version as the AWSCURRENT
version, or it might not be attached to any version. If the AWSPENDING
staging label is present but not attached to the same version as AWSCURRENT
, then any later invocation of RotateSecret
assumes that a previous rotation request is still in progress and returns an error.secretsmanager:RotateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. You also need lambda:InvokeFunction
permissions on the rotation function. For more information, see Permissions for rotation.AWSCURRENT
. Then anyone who retrieves the secret gets the new version. For more information, see How rotation works.AWSPENDING
staging label might be attached to the same version as the AWSCURRENT
version, or it might not be attached to any version. If the AWSPENDING
staging label is present but not attached to the same version as AWSCURRENT
, then any later invocation of RotateSecret
assumes that a previous rotation request is still in progress and returns an error.secretsmanager:RotateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. You also need lambda:InvokeFunction
permissions on the rotation function. For more information, see Permissions for rotation.secretsmanager:StopReplicationToReplica
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:StopReplicationToReplica
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
aws:
prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.secretsmanager:TagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
aws:
prefix in your tag names or values because Amazon Web Services reserves it for Amazon Web Services use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.secretsmanager:TagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:UntagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. secretsmanager:UntagResource
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. UpdateSecret
at a sustained rate of more than once every 10 minutes. When you call UpdateSecret
to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.SecretString
or SecretBinary
to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT
to the new version. ClientRequestToken
that matches an existing version's VersionId
, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.secretsmanager:UpdateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey
and kms:Decrypt
permissions on the key. For more information, see Secret encryption and decryption.UpdateSecret
at a sustained rate of more than once every 10 minutes. When you call UpdateSecret
to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.SecretString
or SecretBinary
to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT
to the new version. Then it attaches the label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.ClientRequestToken
that matches an existing version's VersionId
, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.SecretBinary
or SecretString
because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.secretsmanager:UpdateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey
and kms:Decrypt
permissions on the key. For more information, see Secret encryption and decryption.VersionStage
parameter are added to the existing list of staging labels for the version. AWSCURRENT
staging label to this version by including it in this call.AWSCURRENT
, Secrets Manager automatically moves the label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.secretsmanager:UpdateSecretVersionStage
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. VersionStage
parameter are added to the existing list of staging labels for the version. AWSCURRENT
staging label to this version by including it in this call.AWSCURRENT
, Secrets Manager automatically moves the label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.secretsmanager:UpdateSecretVersionStage
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
secretsmanager:ValidateResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
secretsmanager:ValidateResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. IN_PROGRESS
deployment status).IN_PROGRESS
deployment status).IN_PROGRESS
. For more information, see Update an environment in the Proton Administrator guide.
"
+ "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.IN_PROGRESS
. For more information, see Update an environment in the Proton User guide.
"
},
"CancelServiceInstanceDeployment":{
"name":"CancelServiceInstanceDeployment",
@@ -84,7 +84,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
],
- "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.IN_PROGRESS
. For more information, see Update a service instance in the Proton Administrator guide or the Proton User guide.
"
+ "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.IN_PROGRESS
. For more information, see Update a service instance in the Proton User guide.
"
},
"CancelServicePipelineDeployment":{
"name":"CancelServicePipelineDeployment",
@@ -102,7 +102,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
],
- "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.IN_PROGRESS
. For more information, see Update a service pipeline in the Proton Administrator guide or the Proton User guide.
"
+ "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.IN_PROGRESS
. For more information, see Update a service pipeline in the Proton User guide.
"
},
"CreateComponent":{
"name":"CreateComponent",
@@ -121,7 +121,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
],
- "documentation":"CANCELLED
.FAILED
.SUCCEEDED
and the cancellation attempt has no effect.
",
+ "documentation":"provisioning
parameter and set the value to CUSTOMER_MANAGED
. For more information, see Register and publish an environment template in the Proton Administrator Guide.
",
"idempotent":true
},
"CreateEnvironmentTemplateVersion":{
@@ -218,7 +218,7 @@
{"shape":"ConflictException"},
{"shape":"InternalServerException"}
],
- "documentation":"provisioning
parameter and set the value to CUSTOMER_MANAGED
. For more information, see Register and publish an environment template in the Proton User Guide.deploymentType
field defines the mode.IN_PROGRESS
.deploymentType
field defines the mode.IN_PROGRESS
.protonServiceRoleArn
and provisioningRepository
parameter to update or connect to an environment account connection.environmentAccountConnectionId
parameter. You can't update or connect the environment to an environment account connection if it isn't already associated with an environment connection.environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. You can’t update both.provisioningRepository
parameter.provisioningRepository
parameter and omit the protonServiceRoleArn
and environmentAccountConnectionId
parameters.deploymentType
field defines the mode.
"
+ "documentation":"NONE
CURRENT_VERSION
deployment-type
.MINOR_VERSION
MAJOR_VERSION
protonServiceRoleArn
and provisioningRepository
parameter to update or connect to an environment account connection.environmentAccountConnectionId
parameter. You can't update or connect the environment to an environment account connection if it isn't already associated with an environment connection.environmentAccountConnectionId
or protonServiceRoleArn
parameter and value. You can’t update both.provisioningRepository
parameter.provisioningRepository
parameter and omit the protonServiceRoleArn
and environmentAccountConnectionId
parameters.deploymentType
field defines the mode.
"
},
"UpdateEnvironmentAccountConnection":{
"name":"UpdateEnvironmentAccountConnection",
@@ -1207,7 +1207,7 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
],
- "documentation":"NONE
CURRENT_VERSION
deployment-type
.MINOR_VERSION
MAJOR_VERSION
description
parameter to modify the description.spec
parameter to add or delete instances.description
parameter to modify the description.spec
parameter to add or delete instances.deploymentType
field defines the mode.IN_PROGRESS
.deploymentType
field defines the mode.IN_PROGRESS
.templateName
and templateType
.templateName
and templateType
. Repository details (branch, name, and provider) should be of a linked repository. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.serviceInstanceName
is associated with. Provided when a component is attached to a service instance.componentRoleArn
to allow directly defined components to be associated with any environments running in this account.componentRoleArn
to allow directly defined components to be associated with any environments running in this account.componentRoleArn
to allow directly defined components to be associated with this environment.componentRoleArn
to allow directly defined components to be associated with this environment.environmentAccountConnectionId
or protonServiceRoleArn
parameter and omit the provisioningRepository
parameter.environmentAccountConnectionId
or protonServiceRoleArn
parameter and omit the provisioningRepository
parameter.environmentAccountConnectionId
and protonServiceRoleArn
parameters.environmentAccountConnectionId
and protonServiceRoleArn
parameters.myrepos/myrepo
).myrepos/myrepo
).componentRoleArn
to allow directly defined components to be associated with the environment.componentRoleArn
to allow directly defined components to be associated with the environment.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with the environment.componentRoleArn
to allow directly defined components to be associated with the environment.CLOUDFORMATION
can be used for Amazon Web Services-managed provisioning, and TERRAFORM
can be used for self-managed provisioning.CLOUDFORMATION
can be used for Amazon Web Services-managed provisioning, and TERRAFORM
can be used for self-managed provisioning.myrepos/myrepo
.myrepos/myrepo
).true
to remove a configured pipeline repository from the account settings. Don't set this field if you are updating the configured pipeline repository.deletePipelineProvisioningRepository
to true
, and don't set pipelineProvisioningRepository
.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with any environments running in the account.componentRoleArn
to allow directly defined components to be associated with the environment.componentRoleArn
to allow directly defined components to be associated with the environment.supportedComponentSources
doesn't impact existing component attachments to instances based on this template version. A change only affects later associations.supportedComponentSources
doesn't impact existing component attachments to instances based on this template version. A change only affects later associations.myrepos/myrepo
).myrepos/myrepo
).ValidationException
with an IdempotentParameterMismatch
error.
ConflictException
.ValidationException
with an IdempotentParameterMismatch
error.
DELETE_IN_PROGRESS
, the retry returns the resource detail data in the response without performing any further actions.
"
+ "documentation":"ValidationException
with an IdempotentParameterMismatch
error.
ConflictException
.ValidationException
with an IdempotentParameterMismatch
error.
DELETE_IN_PROGRESS
, the retry returns the resource detail data in the response without performing any further actions.
"
}
From 97e532b79b656cdbcc566d2cb28c5517da3b41c9 Mon Sep 17 00:00:00 2001
From: AWS <>
Date: Thu, 29 Sep 2022 18:05:39 +0000
Subject: [PATCH 07/16] Amazon Translate Update: This release enables customers
to access control rights on Translate resources like Parallel Data and Custom
Terminology using Tag Based Authorization.
---
.../feature-AmazonTranslate-662a9e6.json | 6 +
.../codegen-resources/service-2.json | 147 +++++++++++++++++-
2 files changed, 150 insertions(+), 3 deletions(-)
create mode 100644 .changes/next-release/feature-AmazonTranslate-662a9e6.json
diff --git a/.changes/next-release/feature-AmazonTranslate-662a9e6.json b/.changes/next-release/feature-AmazonTranslate-662a9e6.json
new file mode 100644
index 000000000000..fcfb937133fb
--- /dev/null
+++ b/.changes/next-release/feature-AmazonTranslate-662a9e6.json
@@ -0,0 +1,6 @@
+{
+ "type": "feature",
+ "category": "Amazon Translate",
+ "contributor": "",
+ "description": "This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization."
+}
diff --git a/services/translate/src/main/resources/codegen-resources/service-2.json b/services/translate/src/main/resources/codegen-resources/service-2.json
index b5ad55986bde..0f7630e6a5e5 100644
--- a/services/translate/src/main/resources/codegen-resources/service-2.json
+++ b/services/translate/src/main/resources/codegen-resources/service-2.json
@@ -26,7 +26,9 @@
{"shape":"InvalidRequestException"},
{"shape":"LimitExceededException"},
{"shape":"TooManyRequestsException"},
+ {"shape":"TooManyTagsException"},
{"shape":"ConflictException"},
+ {"shape":"ConcurrentModificationException"},
{"shape":"InternalServerException"}
],
"documentation":"IN_PROGRESS
, the job will be marked for termination and put into the STOP_REQUESTED
state. If the job completes before it can be stopped, it is put into the COMPLETED
state. Otherwise, the job is put into the STOPPED
state.JobId
.RequestCertificate
action, there is a delay of several seconds before you can retrieve information about it.
Not Before
and Not After
certificate fields.Issuer
field must not be empty.CertificateArn
argument. Include this argument only when you want to replace a previously imported certificate.fileb://
. For example, you can specify a certificate saved in the C:\\temp
folder as fileb://C:\\temp\\certificate_to_import.pem
. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs.
Not Before
and Not After
certificate fields.Issuer
field must not be empty.CertificateArn
argument. Include this argument only when you want to replace a previously imported certificate.fileb://
. For example, you can specify a certificate saved in the C:\\temp
folder as fileb://C:\\temp\\certificate_to_import.pem
. If you are making an HTTP or HTTPS Query request, include these arguments as BLOBs. DomainName
parameter. You can also specify additional FQDNs in the SubjectAlternativeNames
parameter. DomainName
parameter. You can also specify additional FQDNs in the SubjectAlternativeNames
parameter. RequestCertificate
action, there is a delay of several seconds before you can retrieve information about the new certificate.IMPORTED
. IMPORTED
. FAILED
. For more information, see Certificate Request Failed in the Amazon Web Services Certificate Manager User Guide. FAILED
. For more information, see Certificate Request Failed in the Certificate Manager User Guide. AMAZON_ISSUED
. For certificates that you imported with ImportCertificate, this value is IMPORTED
. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide. AMAZON_ISSUED
. For certificates that you imported with ImportCertificate, this value is IMPORTED
. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Certificate Manager User Guide. arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
AMAZON_ISSUED
. For certificates that you imported with ImportCertificate, this value is IMPORTED
. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Certificate Manager User Guide. PRIVATE
.AMAZON_ISSUED
. IMPORTED
. REVOKED
. openssl rsa -in encrypted_key.pem -out decrypted_key.pem
openssl rsa -in encrypted_key.pem -out decrypted_key.pem
RSA_1024
and RSA_2048
certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048,RSA_4096\"]
returns both RSA_2048
and RSA_4096
certificates.RSA_1024
and RSA_2048
certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048\",\"RSA_4096\"]
returns both RSA_2048
and RSA_4096
certificates.NextToken
element is sent in the response. Use this NextToken
value in a subsequent request to retrieve additional items.SortBy
, you must also specify SortOrder
.SortOrder
, you must also specify SortBy
.arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012
Persistent_2
deployment type.Persistent_2
deployment type.CreateDataRepositoryAssociation
isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache
operation.CreateDataRepositoryTask
operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.CreateFileCache
returns the description of the existing cache. If a cache with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file cache with the specified client request token doesn't exist, CreateFileCache
does the following:
CREATING
.CreateFileCache
call returns while the cache's lifecycle state is still CREATING
. You can check the cache creation status by calling the DescribeFileCaches operation, which returns the cache state along with other information.CreateFileSystem
API operation:
CreateFileSystem
operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.CreateFileSystem
returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file system with the specified client request token doesn't exist, CreateFileSystem
does the following:
CREATING
.CreateFileSystem
operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport-level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives a success message as long as the parameters are the same.CreateFileSystem
call returns while the file system's lifecycle state is still CREATING
. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.CreateFileSystem
API operation:
CreateFileSystem
operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.CreateFileSystem
returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError
. If a file system with the specified client request token doesn't exist, CreateFileSystem
does the following:
CREATING
.CreateFileSystem
call returns while the file system's lifecycle state is still CREATING
. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.Persistent_2
deployment type.DeleteFileCache
operation returns while the cache has the DELETING
status. You can check the cache deletion status by calling the DescribeFileCaches operation, which returns a list of caches in your account. If you pass the cache ID for a deleted cache, the DescribeFileCaches
operation returns a FileCacheNotFound
error.AssociationIds
values are provided in the request, or if filters are used in the request. Data repository associations are supported only for file systems with the Persistent_2
deployment type.file-system-id
filter with the ID of the file system) or data repository associations for a specific repository type (use the data-repository-type
filter with a value of S3
). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.MaxResults
parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, Amazon FSx returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.AssociationIds
values are provided in the request, or if filters are used in the request. Data repository associations are supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type and for Amazon File Cache resources.file-system-id
filter with the ID of the file system) or caches (use the file-cache-id
filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type
filter with a value of S3
or NFS
). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.MaxResults
parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken
value is returned in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.TaskIds
values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.MaxResults
parameter to limit the number of tasks returned in a response. If more tasks remain, Amazon FSx returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.TaskIds
values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems or caches, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.MaxResults
parameter to limit the number of tasks returned in a response. If more tasks remain, a NextToken
value is returned in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.FileCacheIds
value is provided for that cache. Otherwise, it returns descriptions of all caches owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.MaxResults
parameter to limit the number of descriptions in a response. If more cache descriptions remain, the operation returns a NextToken
value in the response. In this case, send a later request with the NextToken
request parameter set to the value of NextToken
from the last response.DescribeFileCaches
is called first without a NextToken
value. Then the operation continues to be called with the NextToken
parameter set to the value of the last NextToken
value until a response has no NextToken
.
",
+ "idempotent":true
},
"DescribeFileSystemAliases":{
"name":"DescribeFileSystemAliases",
@@ -599,6 +653,26 @@
"documentation":"MaxResults
cache descriptions while still including a NextToken
value.DescribeFileCaches
call and the order of caches returned across the responses of a multicall iteration is unspecified.Persistent_2
deployment type.AutoExportPolicy
can have the following event values:
NEW
- Amazon FSx automatically exports new files and directories to the data repository as they are added to the file system.CHANGED
- Amazon FSx automatically exports changes to files and directories on the file system to the data repository.DELETED
- Files and directories are automatically deleted on the data repository when they are deleted on the file system.AutoExportPolicy
.AutoExportPolicy
can have the following event values:
NEW
- New files and directories are automatically exported to the data repository as they are added to the file system.CHANGED
- Changes to files and directories on the file system are automatically exported to the data repository.DELETED
- Files and directories are automatically deleted on the data repository when they are deleted on the file system.AutoExportPolicy
.AutoExportPolicy
defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx automatically exports the defined changes asynchronously once your application finishes modifying the file.AutoExportPolicy
is supported only for file systems with the Persistent_2
deployment type.AutoExportPolicy
defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.AutoExportPolicy
is supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type.AutoImportPolicy
can have the following event values:
NEW
- Amazon FSx automatically imports metadata of files added to the linked S3 bucket that do not currently exist in the FSx file system.CHANGED
- Amazon FSx automatically updates file metadata and invalidates existing file content on the file system as files change in the data repository.DELETED
- Amazon FSx automatically deletes files on the file system as corresponding files are deleted in the data repository.AutoImportPolicy
.AutoImportPolicy
is supported only for file systems with the Persistent_2
deployment type.AutoImportPolicy
is supported only for Amazon FSx for Lustre file systems with the Persistent_2
deployment type./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/
, then you cannot link another data repository with file system path /ns1/ns2
./
) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/
, then you cannot link another data repository with file system path /ns1/ns2
./
) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.AUTO_RELEASE_DATA
task that automatically releases files from the cache.1000
.CACHE_1
.CreateFileCache
operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.LUSTRE
.2.12
.KmsKeyId
isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.
StorageCapacity
parameter, the default is the backup's StorageCapacity
value.StorageCapacity
value. If you provide any other value, Amazon FSx responds with a 400 Bad Request. CreateFileSystemFromBackup
operation.
"
+ "documentation":"CREATING
- The data repository association between the FSx file system and the S3 data repository is being created. The data repository is unavailable.AVAILABLE
- The data repository association is available for use.MISCONFIGURED
- Amazon FSx cannot automatically import updates from the S3 bucket or automatically export updates to the S3 bucket until the data repository association configuration is corrected.UPDATING
- The data repository association is undergoing a customer initiated update that might affect its availability.DELETING
- The data repository association is undergoing a customer initiated deletion.FAILED
- The data repository association is in a terminal state that cannot be recovered.
"
},
"FailureDetails":{"shape":"DataRepositoryFailureDetails"},
"FileSystemPath":{
"shape":"Namespace",
- "documentation":"CREATING
- The data repository association between the file system or cache and the data repository is being created. The data repository is unavailable.AVAILABLE
- The data repository association is available for use.MISCONFIGURED
- The data repository association is misconfigured. Until the configuration is corrected, automatic import and automatic export will not work (only for Amazon FSx for Lustre).UPDATING
- The data repository association is undergoing a customer initiated update that might affect its availability.DELETING
- The data repository association is undergoing a customer initiated deletion.FAILED
- The data repository association is in a terminal state that cannot be recovered./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/
, then you cannot link another data repository with file system path /ns1/ns2
./
) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/
, then you cannot link another data repository with file system path /ns1/ns2
./
) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.s3://myBucket/myPrefix/
. This path specifies where in the S3 data repository files will be imported from or exported to.
"
},
"BatchImportMetaDataOnCreate":{
"shape":"BatchImportMetaDataOnCreate",
- "documentation":"
DataRepositorySubdirectories
parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath
. You can therefore link a single NFS Export to a single data repository association.DataRepositorySubdirectories
parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name
, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories
parameter.s3://myBucket/myPrefix/
.s3://myBucket/myPrefix/
.true
.true
.BatchImportMetaDataOnCreate
is not supported for data repositories linked to an Amazon File Cache resource./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/
, then you cannot link another data repository with cache path /ns1/ns2
.DataRepositorySubdirectories
is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache./exportpath1
. To use this parameter, you must configure DataRepositoryPath
as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories
is not supported for S3 data repositories.
CreateDataRepositoryAssociation
UpdateDataRepositoryAssociation
DescribeDataRepositoryAssociations
Persistent_2
deployment type.
CreateDataRepositoryAssociation
UpdateDataRepositoryAssociation
DescribeDataRepositoryAssociations
Persistent_2
deployment type and for an Amazon File Cache resource.
PENDING
- Amazon FSx has not started the task.EXECUTING
- Amazon FSx is processing the task.FAILED
- Amazon FSx was not able to complete the task. For example, there may be files the task failed to process. The DataRepositoryTaskFailureDetails property provides more information about task failures.SUCCEEDED
- FSx completed the task successfully.CANCELED
- Amazon FSx canceled the task and it did not complete.CANCELING
- FSx is in process of canceling the task.PENDING
or EXECUTING
states. Please retry when the data repository task is finished (with a status of CANCELED
, SUCCEEDED
, or FAILED
). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.
PENDING
- The task has not started.EXECUTING
- The task is in process.FAILED
- The task was not able to be completed. For example, there may be files the task failed to process. The DataRepositoryTaskFailureDetails property provides more information about task failures.SUCCEEDED
- The task has completed successfully.CANCELED
- The task was canceled and it did not complete.CANCELING
- The task is in process of being canceled.PENDING
or EXECUTING
states. Please retry when the data repository task is finished (with a status of CANCELED
, SUCCEEDED
, or FAILED
). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.
"
+ "documentation":"EXPORT_TO_REPOSITORY
data repository task exports from your Lustre file system from to a linked S3 bucket.IMPORT_METADATA_FROM_REPOSITORY
data repository task imports metadata changes from a linked S3 bucket to your Lustre file system.
"
},
"CreationTime":{"shape":"CreationTime"},
"StartTime":{
"shape":"StartTime",
- "documentation":"EXPORT_TO_REPOSITORY
tasks export from your Amazon FSx for Lustre file system to a linked data repository.IMPORT_METADATA_FROM_REPOSITORY
tasks import metadata changes from a linked S3 bucket to your Amazon FSx for Lustre file system.AUTO_RELEASE_DATA
tasks automatically release files from an Amazon File Cache resource.Paths
is not specified, Amazon FSx uses the file system root directory.Paths
is not specified, Amazon FSx uses the file system root directory.DeleteFileCache
operation is successful, this status is DELETING
.DescribeFileCaches
operation.LUSTRE
.2.12
.
"
+ },
+ "FailureDetails":{
+ "shape":"FileCacheFailureDetails",
+ "documentation":"AVAILABLE
- The cache is in a healthy state, and is reachable and available for use.CREATING
- The new cache is being created.DELETING
- An existing cache is being deleted.UPDATING
- The cache is undergoing a customer-initiated update.FAILED
- An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.KmsKeyId
isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.DescribeFileCaches
operation.LUSTRE
.2.12
.
"
+ },
+ "FailureDetails":{
+ "shape":"FileCacheFailureDetails",
+ "documentation":"AVAILABLE
- The cache is in a healthy state, and is reachable and available for use.CREATING
- The new cache is being created.DELETING
- An existing cache is being deleted.UPDATING
- The cache is undergoing a customer-initiated update.FAILED
- An existing cache has experienced an unrecoverable failure. When creating a new cache, the cache was unable to be created.KmsKeyId
isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.CreateFileCache
operation./ns1/
) or subdirectory (such as /ns1/subdir/
) that will be mapped 1-1 with DataRepositoryPath
. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/
, then you cannot link another data repository with cache path /ns1/ns2
.DataRepositorySubdirectories
is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.
"
+ },
+ "DataRepositorySubdirectories":{
+ "shape":"SubDirectoriesPaths",
+ "documentation":"
DataRepositorySubdirectories
parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath
. You can therefore link a single NFS Export to a single data repository association.DataRepositorySubdirectories
parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name
, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories
parameter.s3://myBucket/myPrefix/
./exportpath1
. To use this parameter, you must configure DataRepositoryPath
as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories
is not supported for S3 data repositories.1000
.CACHE_1
.MountName
value when mounting the cache. If you pass a cache ID to the DescribeFileCaches
operation, it returns the the MountName
value as part of the cache's description.2400
GiB.NFS3
, which indicates that the data repository must support the NFSv3 protocol.
"
+ "documentation":"WARN_ONLY
- only warning events are logged.ERROR_ONLY
- only error events are logged.WARN_ERROR
- both warning events and error events are logged.DISABLED
- logging of data repository events is turned off.
WARN_ONLY
- only warning events are logged.ERROR_ONLY
- only error events are logged.WARN_ERROR
- both warning events and error events are logged.DISABLED
- logging of data repository events is turned off.WARN_ERROR
, which can't be changed.
"
+ "documentation":"/aws/fsx
prefix./aws/fsx/lustre
log group.Destination
is provided and the resource does not exist, the request will fail with a BadRequest
error.Level
is set to DISABLED
, you cannot specify a destination in Destination
.
"
}
},
- "documentation":"/aws/fsx
prefix./aws/fsx/lustre
log group (for Amazon FSx for Lustre) or /aws/fsx/filecache
(for Amazon File Cache).Destination
is provided and the resource does not exist, the request will fail with a BadRequest
error.Level
is set to DISABLED
, you cannot specify a destination in Destination
.
WARN_ERROR
for the logging events. which can't be changed.NFS3
, which indicates that the data repository must support the NFSv3 protocol.AutoImportPolicy
that defines file events on the data repository are automatically imported to the file system and an AutoExportPolicy
that defines which file events on the file system are automatically exported to the data repository. File events are when files or directories are added, changed, or deleted on the file system or the data repository.AutoImportPolicy
that defines which file events on the data repository are automatically imported to the file system and an AutoExportPolicy
that defines which file events on the file system are automatically exported to the data repository. File events are when files or directories are added, changed, or deleted on the file system or the data repository.S3DataRepositoryConfiguration
because they don't support automatic import or automatic export.CreateFileSystem
operation.CreateFileCache
operation.