diff --git a/.changes/1.35.21.json b/.changes/1.35.21.json new file mode 100644 index 0000000000..53907cc573 --- /dev/null +++ b/.changes/1.35.21.json @@ -0,0 +1,32 @@ +[ + { + "category": "``codebuild``", + "description": "GitLab Enhancements - Add support for Self-Hosted GitLab runners in CodeBuild. Add group webhooks", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "The `DescribeImageScanning` API now includes `fixAvailable`, `exploitAvailable`, and `fixedInVersion` fields to provide more detailed information about the availability of fixes, exploits, and fixed versions for identified image vulnerabilities.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This is a documentation only release to address various tickets.", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Support for JSON resource-based policies and block public access", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation with configuration information about the BYOL model for RDS for Db2.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Support for additional levels of cross-account, cross-Region organizational units in Automation. Various documentation updates.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c9d9527004..3fbe84a0c0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,17 @@ CHANGELOG ========= +1.35.21 +======= + +* api-change:``codebuild``: GitLab Enhancements - Add support for Self-Hosted GitLab runners in CodeBuild. Add group webhooks +* api-change:``ecr``: The `DescribeImageScanning` API now includes `fixAvailable`, `exploitAvailable`, and `fixedInVersion` fields to provide more detailed information about the availability of fixes, exploits, and fixed versions for identified image vulnerabilities. +* api-change:``ecs``: This is a documentation only release to address various tickets. +* api-change:``lambda``: Support for JSON resource-based policies and block public access +* api-change:``rds``: Updates Amazon RDS documentation with configuration information about the BYOL model for RDS for Db2. +* api-change:``ssm``: Support for additional levels of cross-account, cross-Region organizational units in Automation. Various documentation updates. + + 1.35.20 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index d053516c27..71fa9d8af5 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.20' +__version__ = '1.35.21' class NullHandler(logging.Handler): diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index a7972dea35..c599b860e5 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -3941,15 +3941,15 @@ "members":{ "name":{ "shape":"String", - "documentation":"
The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively.
" + "documentation":"The name of either the group, enterprise, or organization that will send webhook events to CodeBuild, depending on the type of webhook.
" }, "domain":{ "shape":"String", - "documentation":"The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE
" + "documentation":"The domain of the GitHub Enterprise organization or the GitLab Self Managed group. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE or GITLAB_SELF_MANAGED.
" }, "scope":{ "shape":"WebhookScopeType", - "documentation":"The type of scope for a GitHub webhook.
" + "documentation":"The type of scope for a GitHub or GitLab webhook.
" } }, "documentation":"Contains configuration information about the scope for a webhook.
" @@ -4867,7 +4867,8 @@ "type":"string", "enum":[ "GITHUB_ORGANIZATION", - "GITHUB_GLOBAL" + "GITHUB_GLOBAL", + "GITLAB_GROUP" ] }, "WrapperBoolean":{"type":"boolean"}, diff --git a/botocore/data/ecr/2015-09-21/service-2.json b/botocore/data/ecr/2015-09-21/service-2.json index 9e35f8afd5..4c353da20b 100644 --- a/botocore/data/ecr/2015-09-21/service-2.json +++ b/botocore/data/ecr/2015-09-21/service-2.json @@ -1806,7 +1806,7 @@ "members":{ "encryptionType":{ "shape":"EncryptionType", - "documentation":"The encryption type to use.
If you use the KMS
encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created.
If you use the KMS_DSSE
encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the KMS Management Service key stored in KMS. Similar to the KMS encryption type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you've already created.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide.
The encryption type to use.
If you use the KMS
encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created.
If you use the KMS_DSSE
encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the KMS Management Service key stored in KMS. Similar to the KMS
encryption type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you've already created.
If you use the AES256
encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.
For more information, see Amazon ECR encryption at rest in the Amazon Elastic Container Registry User Guide.
" }, "kmsKey":{ "shape":"KmsKey", @@ -1900,6 +1900,14 @@ "updatedAt":{ "shape":"Date", "documentation":"The date and time the finding was last updated at.
" + }, + "fixAvailable":{ + "shape":"FixAvailable", + "documentation":"Details on whether a fix is available through a version update. This value can be YES
, NO
, or PARTIAL
. A PARTIAL
fix means that some, but not all, of the packages identified in the finding have fixes available through updated versions.
If a finding discovered in your environment has an exploit available.
" } }, "documentation":"The details of an enhanced image scan. This is returned when enhanced scanning is enabled for your private registry.
" @@ -1912,6 +1920,7 @@ "EvaluationTimestamp":{"type":"timestamp"}, "ExceptionMessage":{"type":"string"}, "ExpirationTimestamp":{"type":"timestamp"}, + "ExploitAvailable":{"type":"string"}, "FilePath":{"type":"string"}, "FindingArn":{"type":"string"}, "FindingDescription":{"type":"string"}, @@ -1932,6 +1941,8 @@ "key":{"shape":"FindingSeverity"}, "value":{"shape":"SeverityCount"} }, + "FixAvailable":{"type":"string"}, + "FixedInVersion":{"type":"string"}, "ForceFlag":{"type":"boolean"}, "GetAccountSettingRequest":{ "type":"structure", @@ -4466,6 +4477,10 @@ "version":{ "shape":"Version", "documentation":"The version of the vulnerable package.
" + }, + "fixedInVersion":{ + "shape":"FixedInVersion", + "documentation":"The version of the package that contains the vulnerability fix.
" } }, "documentation":"Information on the vulnerable package identified by a finding.
" diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 90069bd5ef..f1a5b1a74a 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -1561,11 +1561,11 @@ "members":{ "name":{ "shape":"String", - "documentation":"The name of a container. If you're linking multiple containers together in a task definition, the name
of one container can be entered in the links
of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in tthe docker conainer create command and the --name
option to docker run.
The name of a container. If you're linking multiple containers together in a task definition, the name
of one container can be entered in the links
of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in the docker container create command and the --name
option to docker run.
The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag
or repository-url/image@digest
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the docker conainer create command and the IMAGE
parameter of docker run.
When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.
Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag
or registry/repository@digest
. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest
or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE
.
Images in official repositories on Docker Hub use a single name (for example, ubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu
).
The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag
or repository-url/image@digest
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the docker container create command and the IMAGE
parameter of docker run.
When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.
Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag
or registry/repository@digest
. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest
or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE
.
Images in official repositories on Docker Hub use a single name (for example, ubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu
).
The number of cpu
units reserved for the container. This parameter maps to CpuShares
in the docker conainer create commandand the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.
Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.
On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:
Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.
Agent versions greater than or equal to 1.84.0: CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.
On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0
, which Windows interprets as 1% of one CPU.
The number of cpu
units reserved for the container. This parameter maps to CpuShares
in the docker container create commandand the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page by 1,024.
Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.
On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:
Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 are passed to Docker as 2.
Agent versions greater than or equal to 1.84.0: CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.
On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as 0
, which Windows interprets as 1% of one CPU.
The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory
value, if one is specified. This parameter maps to Memory
in thethe docker conainer create command and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory
and memoryReservation
value, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" + "documentation":"The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory
value, if one is specified. This parameter maps to Memory
in the docker container create command and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory
and memoryReservation
value, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" }, "memoryReservation":{ "shape":"BoxedInteger", - "documentation":"The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory
parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation
in the the docker conainer create command and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory
or memoryReservation
in a container definition. If you specify both, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation
of 128 MiB, and a memory
hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" + "documentation":"The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory
parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation
in the docker container create command and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory
or memoryReservation
in a container definition. If you specify both, memory
must be greater than memoryReservation
. If you specify memoryReservation
, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of memory
is used.
For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation
of 128 MiB, and a memory
hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.
The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.
" }, "links":{ "shape":"StringList", - "documentation":"The links
parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge
. The name:internalName
construct is analogous to name:alias
in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links
in the docker conainer create command and the --link
option to docker run.
This parameter is not supported for Windows containers.
Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.
The links
parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is bridge
. The name:internalName
construct is analogous to name:alias
in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links
in the docker container create command and the --link
option to docker run.
This parameter is not supported for Windows containers.
Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.
The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.
For task definitions that use the awsvpc
network mode, only specify the containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than localhost
. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the the docker conainer create command and the --publish
option to docker run. If the network mode of a task definition is set to none
, then you can't specify port mappings. If the network mode of a task definition is set to host
, then host ports must either be undefined or they must match the container port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings
section DescribeTasks responses.
The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.
For task definitions that use the awsvpc
network mode, only specify the containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than localhost
. There's no loopback for port mappings on Windows, so you can't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the the docker container create command and the --publish
option to docker run. If the network mode of a task definition is set to none
, then you can't specify port mappings. If the network mode of a task definition is set to host
, then host ports must either be undefined or they must match the container port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the Network Bindings section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the networkBindings
section DescribeTasks responses.
Early versions of the Amazon ECS container agent don't properly handle entryPoint
parameters. If you have problems using entryPoint
, update your container agent or enter your commands and arguments as command
array items instead.
The entry point that's passed to the container. This parameter maps to Entrypoint
in tthe docker conainer create command and the --entrypoint
option to docker run.
Early versions of the Amazon ECS container agent don't properly handle entryPoint
parameters. If you have problems using entryPoint
, update your container agent or enter your commands and arguments as command
array items instead.
The entry point that's passed to the container. This parameter maps to Entrypoint
in the docker container create command and the --entrypoint
option to docker run.
The command that's passed to the container. This parameter maps to Cmd
in the docker conainer create command and the COMMAND
parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.
The command that's passed to the container. This parameter maps to Cmd
in the docker container create command and the COMMAND
parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.
The environment variables to pass to a container. This parameter maps to Env
in the docker conainer create command and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.
The environment variables to pass to a container. This parameter maps to Env
in the docker container create command and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.
The mount points for data volumes in your container.
This parameter maps to Volumes
in the the docker conainer create command and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as $env:ProgramData
. Windows containers can't mount directories on a different drive, and mount point can't be across drives.
The mount points for data volumes in your container.
This parameter maps to Volumes
in the docker container create command and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as $env:ProgramData
. Windows containers can't mount directories on a different drive, and mount point can't be across drives.
Data volumes to mount from another container. This parameter maps to VolumesFrom
in tthe docker conainer create command and the --volumes-from
option to docker run.
Data volumes to mount from another container. This parameter maps to VolumesFrom
in the docker container create command and the --volumes-from
option to docker run.
Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.
For tasks that use the EC2 launch type, if the stopTimeout
parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values are 2-120 seconds.
" + "documentation":"Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.
For tasks using the Fargate launch type, the task or service requires the following platforms:
Linux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.
For tasks that use the EC2 launch type, if the stopTimeout
parameter isn't specified, the value set for the Amazon ECS container agent configuration variable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init
package. If your container instances are launched from version 20190301
or later, then they contain the required versions of the container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The valid values for Fargate are 2-120 seconds.
" }, "hostname":{ "shape":"String", - "documentation":"The hostname to use for your container. This parameter maps to Hostname
in thethe docker conainer create command and the --hostname
option to docker run.
The hostname
parameter is not supported if you're using the awsvpc
network mode.
The hostname to use for your container. This parameter maps to Hostname
in the docker container create command and the --hostname
option to docker run.
The hostname
parameter is not supported if you're using the awsvpc
network mode.
The user to use inside the container. This parameter maps to User
in the docker conainer create command and the --user
option to docker run.
When running tasks using the host
network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.
You can specify the user
using the following formats. If specifying a UID or GID, you must specify it as a positive integer.
user
user:group
uid
uid:gid
user:gid
uid:group
This parameter is not supported for Windows containers.
The user to use inside the container. This parameter maps to User
in the docker container create command and the --user
option to docker run.
When running tasks using the host
network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.
You can specify the user
using the following formats. If specifying a UID or GID, you must specify it as a positive integer.
user
user:group
uid
uid:gid
user:gid
uid:group
This parameter is not supported for Windows containers.
The working directory to run commands inside the container in. This parameter maps to WorkingDir
in the docker conainer create command and the --workdir
option to docker run.
The working directory to run commands inside the container in. This parameter maps to WorkingDir
in the docker container create command and the --workdir
option to docker run.
When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled
in the docker conainer create command.
This parameter is not supported for Windows containers.
When this parameter is true, networking is off within the container. This parameter maps to NetworkDisabled
in the docker container create command.
This parameter is not supported for Windows containers.
When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root
user). This parameter maps to Privileged
in the the docker conainer create command and the --privileged
option to docker run
This parameter is not supported for Windows containers or tasks run on Fargate.
When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root
user). This parameter maps to Privileged
in the docker container create command and the --privileged
option to docker run
This parameter is not supported for Windows containers or tasks run on Fargate.
When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs
in the docker conainer create command and the --read-only
option to docker run.
This parameter is not supported for Windows containers.
When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs
in the docker container create command and the --read-only
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS servers that are presented to the container. This parameter maps to Dns
in the the docker conainer create command and the --dns
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS servers that are presented to the container. This parameter maps to Dns
in the docker container create command and the --dns
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch
in the docker conainer create command and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch
in the docker container create command and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
A list of hostnames and IP address mappings to append to the /etc/hosts
file on the container. This parameter maps to ExtraHosts
in the docker conainer create command and the --add-host
option to docker run.
This parameter isn't supported for Windows containers or tasks that use the awsvpc
network mode.
A list of hostnames and IP address mappings to append to the /etc/hosts
file on the container. This parameter maps to ExtraHosts
in the docker container create command and the --add-host
option to docker run.
This parameter isn't supported for Windows containers or tasks that use the awsvpc
network mode.
A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.
For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.
For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.
This parameter maps to SecurityOpt
in the docker conainer create command and the --security-opt
option to docker run.
The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"
" + "documentation":"A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.
For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.
For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.
This parameter maps to SecurityOpt
in the docker container create command and the --security-opt
option to docker run.
The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"
" }, "interactive":{ "shape":"BoxedBoolean", - "documentation":"When this parameter is true
, you can deploy containerized applications that require stdin
or a tty
to be allocated. This parameter maps to OpenStdin
in the docker conainer create command and the --interactive
option to docker run.
When this parameter is true
, you can deploy containerized applications that require stdin
or a tty
to be allocated. This parameter maps to OpenStdin
in the docker container create command and the --interactive
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to Tty
in tthe docker conainer create command and the --tty
option to docker run.
When this parameter is true
, a TTY is allocated. This parameter maps to Tty
in the docker container create command and the --tty
option to docker run.
A key/value map of labels to add to the container. This parameter maps to Labels
in the docker conainer create command and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
A key/value map of labels to add to the container. This parameter maps to Labels
in the docker container create command and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
A list of ulimits
to set in the container. If a ulimit
value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in tthe docker conainer create command and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 65535
and the default hard limit is 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
A list of ulimits
to set in the container. If a ulimit
value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits
in the docker container create command and the --ulimit
option to docker run. Valid naming values are displayed in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 65535
and the default hard limit is 65535
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
This parameter is not supported for Windows containers.
The log configuration specification for the container.
This parameter maps to LogConfig
in the docker conainer create command and the --log-driver
option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options).
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
The log configuration specification for the container.
This parameter maps to LogConfig
in the docker container create command and the --log-driver
option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options).
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck
in the docker conainer create command and the HEALTHCHECK
parameter of docker run.
The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck
in the docker container create command and the HEALTHCHECK
parameter of docker run.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in tthe docker conainer create command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in the docker container create command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
If a service is using the rolling update (ECS
) deployment type, the maximumPercent
parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage of the desiredCount
(rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default maximumPercent
value for a service using the REPLICA
service scheduler is 200%.
If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and tasks that use the EC2 launch type, the maximum percent value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the maximumPercent
parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING
or PENDING
state during a deployment, as a percentage of the desiredCount
(rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the REPLICA
service scheduler and has a desiredCount
of four tasks and a maximumPercent
value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default maximumPercent
value for a service using the REPLICA
service scheduler is 200%.
If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types, and tasks in the service use the EC2 launch type, the maximum percent value is set to the default value. The maximum percent value is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state.
You can't specify a custom maximumPercent
value for a service that uses either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and has tasks that use the EC2 launch type.
If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.
" }, "minimumHealthyPercent":{ "shape":"BoxedInteger", - "documentation":"If a service is using the rolling update (ECS
) deployment type, the minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
(rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount
of four tasks and a minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following should be noted:
A service is considered healthy if all essential containers within the tasks in the service pass their health checks.
If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent total.
If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.
For services that do use a load balancer, the following should be noted:
If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.
If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.
The default value for a replica service for minimumHealthyPercent
is 100%. The default minimumHealthyPercent
value for a service using the DAEMON
service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console.
The minimum number of healthy tasks during a deployment is the desiredCount
multiplied by the minimumHealthyPercent
/100, rounded up to the nearest integer value.
If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state. If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the minimumHealthyPercent
represents a lower limit on the number of your service's tasks that must remain in the RUNNING
state during a deployment, as a percentage of the desiredCount
(rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount
of four tasks and a minimumHealthyPercent
of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following should be noted:
A service is considered healthy if all essential containers within the tasks in the service pass their health checks.
If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a RUNNING
state before the task is counted towards the minimum healthy percent total.
If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.
For services that do use a load balancer, the following should be noted:
If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.
If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.
The default value for a replica service for minimumHealthyPercent
is 100%. The default minimumHealthyPercent
value for a service using the DAEMON
service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the APIs and 50% for the Amazon Web Services Management Console.
The minimum number of healthy tasks during a deployment is the desiredCount
multiplied by the minimumHealthyPercent
/100, rounded up to the nearest integer value.
If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value. The minimum healthy percent value is used to define the lower limit on the number of the tasks in the service that remain in the RUNNING
state while the container instances are in the DRAINING
state.
You can't specify a custom minimumHealthyPercent
value for a service that uses either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and has tasks that use the EC2 launch type.
If a service is using either the blue/green (CODE_DEPLOY
) or EXTERNAL
deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.
The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls
to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to Driver
in the docker conainer create command and the xxdriver
option to docker volume create.
The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls
to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to Driver
in the docker container create command and the xxdriver
option to docker volume create.
Custom metadata to add to your Docker volume. This parameter maps to Labels
in the docker conainer create command and the xxlabel
option to docker volume create.
Custom metadata to add to your Docker volume. This parameter maps to Labels
in the docker container create command and the xxlabel
option to docker volume create.
This parameter is specified when you're using Docker volumes. Docker volumes are only supported when you're using the EC2 launch type. Windows containers only support the use of the local
driver. To use bind mounts, specify a host
instead.
A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD
to run the command arguments directly, or CMD-SHELL
to run the command with the container's default shell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.
[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
CMD-SHELL, curl -f http://localhost/ || exit 1
An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck
in tthe docker conainer create command
A string array representing the command that the container runs to determine if it is healthy. The string array must start with CMD
to run the command arguments directly, or CMD-SHELL
to run the command with the container's default shell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list of commands in double quotes and brackets.
[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.
CMD-SHELL, curl -f http://localhost/ || exit 1
An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see HealthCheck
in the docker container create command
The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd
in the docker conainer create command and the --cap-add
option to docker run.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel capability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to CapAdd
in the docker container create command and the --cap-add
option to docker run.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel capability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop
in the docker conainer create command and the --cap-drop
option to docker run.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to CapDrop
in the docker container create command and the --cap-drop
option to docker run.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"
The Linux capabilities to add or remove from the default Docker configuration for a container defined in the task definition. For more detailed information about these Linux capabilities, see the capabilities(7) Linux manual page.
" @@ -3541,7 +3541,7 @@ }, "devices":{ "shape":"DevicesList", - "documentation":"Any host devices to expose to the container. This parameter maps to Devices
in tthe docker conainer create command and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the devices
parameter isn't supported.
Any host devices to expose to the container. This parameter maps to Devices
in the docker container create command and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the devices
parameter isn't supported.
The secrets to pass to the log configuration. For more information, see Specifying sensitive data in the Amazon Elastic Container Service Developer Guide.
" } }, - "documentation":"The log configuration for the container. This parameter maps to LogConfig
in the docker conainer create command and the --log-driver
option to docker run.
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
,syslog
, splunk
, and awsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The log configuration for the container. This parameter maps to LogConfig
in the docker container create command and the --log-driver
option to docker run.
By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.
Understand the following when specifying a log configuration for your containers.
Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.
For tasks on Fargate, the supported log drivers are awslogs
, splunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are awslogs
, fluentd
, gelf
, json-file
, journald
,syslog
, splunk
, and awsfirelens
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.
The port number range on the container that's bound to the dynamically mapped host port range.
The following rules apply when you specify a containerPortRange
:
You must use either the bridge
network mode or the awsvpc
network mode.
This parameter is available for both the EC2 and Fargate launch types.
This parameter is available for both the Linux and Windows operating systems.
The container instance must have at least version 1.67.0 of the container agent and at least version 1.67.0-1 of the ecs-init
package
You can specify a maximum of 100 port ranges per container.
You do not specify a hostPortRange
. The value of the hostPortRange
is set as follows:
For containers in a task with the awsvpc
network mode, the hostPortRange
is set to the same value as the containerPortRange
. This is a static mapping strategy.
For containers in a task with the bridge
network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.
The containerPortRange
valid values are between 1 and 65535.
A port can only be included in one port mapping per container.
You cannot specify overlapping port ranges.
The first port in the range must be less than last port in the range.
Docker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.
For more information, see Issue #11185 on the Github website.
For information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.
You can call DescribeTasks
to view the hostPortRange
which are the host ports that are bound to the container ports.
Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.
If you use containers in a task with the awsvpc
or host
network mode, specify the exposed ports using containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Most fields of this parameter (containerPort
, hostPort
, protocol
) maps to PortBindings
in the docker conainer create command and the --publish
option to docker run
. If the network mode of a task definition is set to host
, host ports must either be undefined or match the container port in the port mapping.
You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the networkBindings
section of DescribeTasks API responses.
Port mappings allow containers to access ports on the host container instance to send or receive traffic. Port mappings are specified as part of the container definition.
If you use containers in a task with the awsvpc
or host
network mode, specify the exposed ports using containerPort
. The hostPort
can be left blank or it must be the same value as the containerPort
.
Most fields of this parameter (containerPort
, hostPort
, protocol
) maps to PortBindings
in the docker container create command and the --publish
option to docker run
. If the network mode of a task definition is set to host
, host ports must either be undefined or match the container port in the port mapping.
You can't expose the same container port for multiple protocols. If you attempt this, an error is returned.
After a task reaches the RUNNING
status, manual and automatic host and container port assignments are visible in the networkBindings
section of DescribeTasks API responses.
The Amazon ECS account setting name to modify.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights
is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateFIPSMode
- If you specify fargateFIPSMode
, Fargate FIPS 140 compliance is affected.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The Amazon ECS account setting name to modify.
The following are the valid values for the account setting name.
serviceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
taskLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
containerInstanceLongArnFormat
- When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.
awsvpcTrunking
- When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking
is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.
containerInsights
- When modified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights
is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
dualStackIPv6
- When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc
network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.
fargateTaskRetirementWaitPeriod
- When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod
to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.
tagResourceAuthorization
- Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster
. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource
action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
guardDutyActivate
- The guardDutyActivate
parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.
The namespaced kernel parameter to set a value
for.
Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\"
, and Sysctls
that start with \"fs.mqueue.*\"
Valid network namespace values: Sysctls
that start with \"net.*\"
All of these values are supported by Fargate.
" } }, - "documentation":"A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in tthe docker conainer create command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
We don't recommend that you specify network-related systemControls
parameters for multiple containers in a single task that also uses either the awsvpc
or host
network mode. Doing this has the following disadvantages:
For tasks that use the awsvpc
network mode including Fargate, if you set systemControls
for any container, it applies to all containers in the task. If you set different systemControls
for multiple containers in a single task, the container that's started last determines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace systemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.
For tasks that use the host
IPC mode, IPC namespace systemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace systemControls
values apply to all containers within a task.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls
in the docker container create command and the --sysctl
option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time
setting to maintain longer lived connections.
We don't recommend that you specify network-related systemControls
parameters for multiple containers in a single task that also uses either the awsvpc
or host
network mode. Doing this has the following disadvantages:
For tasks that use the awsvpc
network mode including Fargate, if you set systemControls
for any container, it applies to all containers in the task. If you set different systemControls
for multiple containers in a single task, the container that's started last determines which systemControls
take effect.
For tasks that use the host
network mode, the network namespace systemControls
aren't supported.
If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.
For tasks that use the host
IPC mode, IPC namespace systemControls
aren't supported.
For tasks that use the task
IPC mode, IPC namespace systemControls
values apply to all containers within a task.
This parameter is not supported for Windows containers.
This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0
or later (Linux). This isn't supported for Windows containers on Fargate.
The task launch types the task definition validated against during task definition registration. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
" + "documentation":"Amazon ECS validates the task definition parameters with those supported by the launch type. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
" }, "runtimePlatform":{ "shape":"RuntimePlatform", @@ -6437,11 +6437,11 @@ }, "softLimit":{ "shape":"Integer", - "documentation":"The soft limit for the ulimit
type.
The soft limit for the ulimit
type. The value can be specified in bytes, seconds, or as a count, depending on the type
of the ulimit
.
The hard limit for the ulimit
type.
The hard limit for the ulimit
type. The value can be specified in bytes, seconds, or as a count, depending on the type
of the ulimit
.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile
resource limit parameter which Fargate overrides. The nofile
resource limit sets a restriction on the number of open files that a container can use. The default nofile
soft limit is 65535
and the default hard limit is 65535
.
You can specify the ulimit
settings for a container in a task definition.
Deletes the provisioned concurrency configuration for a function.
" }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/2024-09-16/resource-policy/{ResourceArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"PreconditionFailedException"} + ], + "documentation":"Deletes a resource-based policy from a function.
" + }, "GetAccountSettings":{ "name":"GetAccountSettings", "http":{ @@ -581,6 +599,40 @@ ], "documentation":"Retrieves the provisioned concurrency configuration for a function's alias or version.
" }, + "GetPublicAccessBlockConfig":{ + "name":"GetPublicAccessBlockConfig", + "http":{ + "method":"GET", + "requestUri":"/2024-09-16/public-access-block/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"GetPublicAccessBlockConfigRequest"}, + "output":{"shape":"GetPublicAccessBlockConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Retrieve the public-access settings for a function.
" + }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"GET", + "requestUri":"/2024-09-16/resource-policy/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"GetResourcePolicyRequest"}, + "output":{"shape":"GetResourcePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InvalidParameterValueException"} + ], + "documentation":"Retrieves the resource-based policy attached to a function.
" + }, "GetRuntimeManagementConfig":{ "name":"GetRuntimeManagementConfig", "http":{ @@ -1028,6 +1080,45 @@ ], "documentation":"Adds a provisioned concurrency configuration to a function's alias or version.
" }, + "PutPublicAccessBlockConfig":{ + "name":"PutPublicAccessBlockConfig", + "http":{ + "method":"PUT", + "requestUri":"/2024-09-16/public-access-block/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"PutPublicAccessBlockConfigRequest"}, + "output":{"shape":"PutPublicAccessBlockConfigResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"Configure your function's public-access settings.
To control public access to a Lambda function, you can choose whether to allow the creation of resource-based policies that allow public access to that function. You can also block public access to a function, even if it has an existing resource-based policy that allows it.
" + }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"PUT", + "requestUri":"/2024-09-16/resource-policy/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"PolicyLengthExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"PublicPolicyException"} + ], + "documentation":"Adds a resource-based policy to a function. You can use resource-based policies to grant access to other Amazon Web Services accounts, organizations, or services. Resource-based policies apply to a single function, version, or alias.
Adding a resource-based policy using this API action replaces any existing policy you've previously created. This means that if you've previously added resource-based permissions to a function using the AddPermission action, those permissions will be overwritten by your new policy.
The Amazon Resource Name (ARN) of the function you want to delete the policy from. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted.
", + "location":"uri", + "locationName":"ResourceArn" + }, + "RevisionId":{ + "shape":"RevisionId", + "documentation":"Delete the existing policy only if its revision ID matches the string you specify. To find the revision ID of the policy currently attached to your function, use the GetResourcePolicy action.
", + "location":"querystring", + "locationName":"RevisionId" + } + } + }, "Description":{ "type":"string", "max":256, @@ -3466,6 +3575,52 @@ } } }, + "GetPublicAccessBlockConfigRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"PublicAccessBlockResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the function you want to retrieve public-access settings for.
", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "GetPublicAccessBlockConfigResponse":{ + "type":"structure", + "members":{ + "PublicAccessBlockConfig":{ + "shape":"PublicAccessBlockConfig", + "documentation":"The public-access settings configured for the function you specified
" + } + } + }, + "GetResourcePolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"PolicyResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the function you want to retrieve the policy for. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted.
", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "GetResourcePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"The resource-based policy attached to the function you specified.
" + }, + "RevisionId":{ + "shape":"RevisionId", + "documentation":"The revision ID of the policy.
" + } + } + }, "GetRuntimeManagementConfigRequest":{ "type":"structure", "required":["FunctionName"], @@ -4789,6 +4944,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "PolicyResourceArn":{ + "type":"string", + "max":256, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_])+)?" + }, "PositiveInteger":{ "type":"integer", "min":1 @@ -4875,6 +5035,38 @@ "FAILED" ] }, + "PublicAccessBlockConfig":{ + "type":"structure", + "members":{ + "BlockPublicPolicy":{ + "shape":"NullableBoolean", + "documentation":"To block the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy
to true
. To allow the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy
to false
.
To block public access to your function, even if its resource-based policy allows it, set RestrictPublicResource
to true
. To allow public access to a function with a resource-based policy that permits it, set RestrictPublicResource
to false
.
An object that defines the public-access settings for a function.
" + }, + "PublicAccessBlockResourceArn":{ + "type":"string", + "max":170, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+" + }, + "PublicPolicyException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"The exception type.
" + }, + "Message":{"shape":"String"} + }, + "documentation":"Lambda prevented your policy from being created because it would grant public access to your function. If you intended to create a public policy, use the PutPublicAccessBlockConfig API action to configure your function's public-access settings to allow public policies.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "PublishLayerVersionRequest":{ "type":"structure", "required":[ @@ -5143,6 +5335,70 @@ } } }, + "PutPublicAccessBlockConfigRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "PublicAccessBlockConfig" + ], + "members":{ + "ResourceArn":{ + "shape":"PublicAccessBlockResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the function you want to configure public-access settings for. Public-access settings are applied at the function level, so you can't apply different settings to function versions or aliases.
", + "location":"uri", + "locationName":"ResourceArn" + }, + "PublicAccessBlockConfig":{ + "shape":"PublicAccessBlockConfig", + "documentation":"An object defining the public-access settings you want to apply.
To block the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy
to true
. To allow the creation of resource-based policies that would grant public access to your function, set BlockPublicPolicy
to false
.
To block public access to your function, even if its resource-based policy allows it, set RestrictPublicResource
to true
. To allow public access to a function with a resource-based policy that permits it, set RestrictPublicResource
to false
.
The default setting for both BlockPublicPolicy
and RestrictPublicResource
is true
.
The public-access settings Lambda applied to your function.
" + } + } + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"PolicyResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the function you want to add the policy to. You can use either a qualified or an unqualified ARN, but the value you specify must be a complete ARN and wildcard characters are not accepted.
", + "location":"uri", + "locationName":"ResourceArn" + }, + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"The JSON resource-based policy you want to add to your function.
To learn more about creating resource-based policies for controlling access to Lambda, see Working with resource-based IAM policies in Lambda in the Lambda Developer Guide.
" + }, + "RevisionId":{ + "shape":"RevisionId", + "documentation":"Replace the existing policy only if its revision ID matches the string you specify. To find the revision ID of the policy currently attached to your function, use the GetResourcePolicy action.
" + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"ResourcePolicy", + "documentation":"The policy Lambda added to your function.
" + }, + "RevisionId":{ + "shape":"RevisionId", + "documentation":"The revision ID of the policy Lambda added to your function.
" + } + } + }, "PutRuntimeManagementConfigRequest":{ "type":"structure", "required":[ @@ -5371,6 +5627,12 @@ "error":{"httpStatusCode":502}, "exception":true }, + "ResourcePolicy":{ + "type":"string", + "max":20480, + "min":1, + "pattern":"[\\s\\S]+" + }, "ResponseStreamingInvocationType":{ "type":"string", "enum":[ @@ -5378,6 +5640,12 @@ "DryRun" ] }, + "RevisionId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, "RoleArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index a3ae53f176..8a726e1b3f 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -4483,7 +4483,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"The license model information for this DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.
The default for RDS for Db2 is bring-your-own-license
.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
The license model information for this DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide.
The default for RDS for Db2 is bring-your-own-license
.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
License model information for the restored DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
Default: Same as the source.
" + "documentation":"License model information for the restored DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
Default: Same as the source.
" }, "DBName":{ "shape":"String", @@ -15644,7 +15644,7 @@ }, "LicenseModel":{ "shape":"String", - "documentation":"The license model information for the restored DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
Default: Same as the source.
" + "documentation":"The license model information for the restored DB instance.
License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide.
This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.
Valid Values:
RDS for Db2 - bring-your-own-license | marketplace-license
RDS for MariaDB - general-public-license
RDS for Microsoft SQL Server - license-included
RDS for MySQL - general-public-license
RDS for Oracle - bring-your-own-license | license-included
RDS for PostgreSQL - postgresql-license
Default: Same as the source.
" }, "DBName":{ "shape":"String", diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index c0e9d5c26b..2eac00f7a6 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -92,7 +92,7 @@ {"shape":"InvalidParameters"}, {"shape":"InternalServerError"} ], - "documentation":"Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Setting up Amazon Web Services Systems Manager for hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes.
Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes.
Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs on your managed nodes. For more information about SSM documents, including information about supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs on your managed nodes. For more information about SSM documents, including information about supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the Amazon Web Services Systems Manager User Guide.
" }, "CreateMaintenanceWindow":{ "name":"CreateMaintenanceWindow", @@ -239,7 +239,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination
and SyncFromSource
.
You can configure Systems Manager Inventory to use the SyncToDestination
type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.
You can configure Systems Manager Explorer to use the SyncFromSource
type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization
by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide.
A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.
By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.
A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination
and SyncFromSource
.
You can configure Systems Manager Inventory to use the SyncToDestination
type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.
You can configure Systems Manager Explorer to use the SyncFromSource
type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization
by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide.
A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.
By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.
The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify.
For the key SourceUrl, the value is an S3 bucket location. For example:
\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]
For the key S3FileUrl, the value is a file in an S3 bucket. For example:
\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]
For the key AttachmentReference, the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:
\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]
However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:
\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]
The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify.
For the key SourceUrl, the value is an S3 bucket location. For example:
\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]
For the key S3FileUrl, the value is a file in an S3 bucket. For example:
\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]
For the key AttachmentReference, the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:
\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]
However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:
\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]
The CloudWatch alarm that was invoked by the automation.
" }, + "TargetLocationsURL":{ + "shape":"TargetLocationsURL", + "documentation":"A publicly accessible URL for a file that contains the TargetLocations
body. Currently, only files in presigned Amazon S3 buckets are supported
The subtype of the Automation operation. Currently, the only supported value is ChangeRequest
.
Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide.
" }, "AlarmConfiguration":{ "shape":"AlarmConfiguration", @@ -3583,6 +3587,10 @@ "shape":"AlarmStateInformationList", "documentation":"The CloudWatch alarm that was invoked by the automation.
" }, + "TargetLocationsURL":{ + "shape":"TargetLocationsURL", + "documentation":"A publicly accessible URL for a file that contains the TargetLocations
body. Currently, only files in presigned Amazon S3 buckets are supported
The subtype of the Automation operation. Currently, the only supported value is ChangeRequest
.
A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -3729,7 +3737,7 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "RejectedPatchesAction":{ "shape":"PatchAction", @@ -3974,7 +3982,7 @@ }, "value":{ "shape":"CommandFilterValue", - "documentation":"The filter value. Valid values for each filter key are as follows:
InvokedAfter: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z
to see a list of command executions occurring July 7, 2021, and later.
InvokedBefore: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z
to see a list of command executions from before July 7, 2021.
Status: Specify a valid command status to see a list of all command executions with that status. The status choices depend on the API you call.
The status values you can specify for ListCommands
are:
Pending
InProgress
Success
Cancelled
Failed
TimedOut
(this includes both Delivery and Execution time outs)
AccessDenied
DeliveryTimedOut
ExecutionTimedOut
Incomplete
NoInstancesInTag
LimitExceeded
The status values you can specify for ListCommandInvocations
are:
Pending
InProgress
Delayed
Success
Cancelled
Failed
TimedOut
(this includes both Delivery and Execution time outs)
AccessDenied
DeliveryTimedOut
ExecutionTimedOut
Undeliverable
InvalidPlatform
Terminated
DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline
to see command executions that used this SSM document to perform security patching operations on managed nodes.
ExecutionStage: Specify one of the following values (ListCommands
operations only):
Executing
: Returns a list of command executions that are currently still running.
Complete
: Returns a list of command executions that have already completed.
The filter value. Valid values for each filter key are as follows:
InvokedAfter: Specify a timestamp to limit your results. For example, specify 2024-07-07T00:00:00Z
to see a list of command executions occurring July 7, 2021, and later.
InvokedBefore: Specify a timestamp to limit your results. For example, specify 2024-07-07T00:00:00Z
to see a list of command executions from before July 7, 2021.
Status: Specify a valid command status to see a list of all command executions with that status. The status choices depend on the API you call.
The status values you can specify for ListCommands
are:
Pending
InProgress
Success
Cancelled
Failed
TimedOut
(this includes both Delivery and Execution time outs)
AccessDenied
DeliveryTimedOut
ExecutionTimedOut
Incomplete
NoInstancesInTag
LimitExceeded
The status values you can specify for ListCommandInvocations
are:
Pending
InProgress
Delayed
Success
Cancelled
Failed
TimedOut
(this includes both Delivery and Execution time outs)
AccessDenied
DeliveryTimedOut
ExecutionTimedOut
Undeliverable
InvalidPlatform
Terminated
DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline
to see command executions that used this SSM document to perform security patching operations on managed nodes.
ExecutionStage: Specify one of the following values (ListCommands
operations only):
Executing
: Returns a list of command executions that are currently still running.
Complete
: Returns a list of command executions that have already completed.
Describes a command filter.
A managed node ID can't be specified when a command status is Pending
because the command hasn't run on the node yet.
The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:
doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript
doc-example-bucket
is the name of the S3 bucket;
ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix
is the name of the S3 prefix;
i-02573cafcfEXAMPLE
is the managed node ID;
awsrunShellScript
is the name of the plugin.
The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:
amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript
amzn-s3-demo-bucket
is the name of the S3 bucket;
my-prefix
is the name of the S3 prefix;
i-02573cafcfEXAMPLE
is the managed node ID;
awsrunShellScript
is the name of the plugin.
The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:
doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript
doc-example-bucket
is the name of the S3 bucket;
ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix
is the name of the S3 prefix;
i-02573cafcfEXAMPLE
is the managed node ID;
awsrunShellScript
is the name of the plugin.
The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response:
amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript
amzn-s3-demo-bucket
is the name of the S3 bucket;
my-prefix
is the name of the S3 prefix;
i-02573cafcfEXAMPLE
is the managed node ID;
awsrunShellScript
is the name of the plugin.
Describes plugin details.
" @@ -4485,7 +4493,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create the IAM service role required for Systems Manager in a hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
The date by which this activation request should expire, in timestamp format, such as \"2021-07-07T00:00:00\". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours.
" + "documentation":"The date by which this activation request should expire, in timestamp format, such as \"2024-07-07T00:00:00\". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours.
" }, "Tags":{ "shape":"TagList", @@ -4656,7 +4664,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds
key with a value of *
. For more information about choosing targets for an association, see About targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide.
The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds
key with a value of *
. For more information about choosing targets for an association, see Understanding targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide.
A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -5012,7 +5020,7 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "RejectedPatchesAction":{ "shape":"PatchAction", @@ -5190,7 +5198,7 @@ }, "DeletionSummary":{ "shape":"InventoryDeletionSummary", - "documentation":"A summary of the delete operation. For more information about this summary, see Understanding the delete inventory summary in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the Amazon Web Services Systems Manager User Guide.
" } } }, @@ -6005,7 +6013,7 @@ }, "Filters":{ "shape":"PatchOrchestratorFilterList", - "documentation":"Each element in the array is a structure containing a key-value pair.
Supported keys for DescribeInstancePatches
include the following:
Classification
Sample values: Security
| SecurityUpdates
KBId
Sample values: KB4480056
| java-1.7.0-openjdk.x86_64
Severity
Sample values: Important
| Medium
| Low
State
Sample values: Installed
| InstalledOther
| InstalledPendingReboot
For lists of all State
values, see Understanding patch compliance state values in the Amazon Web Services Systems Manager User Guide.
Each element in the array is a structure containing a key-value pair.
Supported keys for DescribeInstancePatches
include the following:
Classification
Sample values: Security
| SecurityUpdates
KBId
Sample values: KB4480056
| java-1.7.0-openjdk.x86_64
Severity
Sample values: Important
| Medium
| Low
State
Sample values: Installed
| InstalledOther
| InstalledPendingReboot
For lists of all State
values, see Patch compliance state values in the Amazon Web Services Systems Manager User Guide.
Each entry in the array is a structure containing:
Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore
and ExecutedAfter
.
Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z
.
Each entry in the array is a structure containing:
Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore
and ExecutedAfter
.
Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2024-11-04T05:00:00Z
.
The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -8373,7 +8393,7 @@ "members":{ "Name":{ "shape":"PSParameterName", - "documentation":"The name or Amazon Resource Name (ARN) of the parameter that you want to query. For parameters shared with you from another account, you must use the full ARN.
To query by parameter label, use \"Name\": \"name:label\"
. To query by parameter version, use \"Name\": \"name:version\"
.
For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The name or Amazon Resource Name (ARN) of the parameter that you want to query. For parameters shared with you from another account, you must use the full ARN.
To query by parameter label, use \"Name\": \"name:label\"
. To query by parameter version, use \"Name\": \"name:version\"
.
For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.
" }, "WithDecryption":{ "shape":"Boolean", @@ -8907,7 +8927,7 @@ }, "Name":{ "shape":"String", - "documentation":"The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName
property using the CreateActivation command. It is applied to the managed node by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as explained in Install SSM Agent for a hybrid and multicloud environment (Linux) and Install SSM Agent for a hybrid and multicloud environment (Windows). To retrieve the Name
tag of an EC2 instance, use the Amazon EC2 DescribeInstances
operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.
The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName
property using the CreateActivation command. It is applied to the managed node by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as explained in How to install SSM Agent on hybrid Linux nodes and How to install SSM Agent on hybrid Windows Server nodes. To retrieve the Name
tag of an EC2 instance, use the Amazon EC2 DescribeInstances
operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.
An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline
, overrides the patches specified by the default patch baseline.
For more information about the InstallOverrideList
parameter, see About the AWS-RunPatchBaseline SSM document
in the Amazon Web Services Systems Manager User Guide.
An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline
, overrides the patches specified by the default patch baseline.
For more information about the InstallOverrideList
parameter, see SSM Command document for patching: AWS-RunPatchBaseline
in the Amazon Web Services Systems Manager User Guide.
Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the Amazon Web Services Systems Manager User Guide.
" }, "LastStatusUpdateTime":{ "shape":"InventoryDeletionLastStatusUpdateTime", @@ -9972,7 +9992,7 @@ }, "Type":{ "shape":"InventoryQueryOperatorType", - "documentation":"The type of filter.
The Exists
filter must be used with aggregators. For more information, see Aggregating inventory data in the Amazon Web Services Systems Manager User Guide.
The type of filter.
The Exists
filter must be used with aggregators. For more information, see Aggregating inventory data in the Amazon Web Services Systems Manager User Guide.
One or more filters. Use a filter to return a more specific list of results.
" @@ -11355,7 +11375,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "TimeoutSeconds":{ "shape":"TimeoutSeconds", @@ -11480,7 +11500,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "MaxConcurrency":{ "shape":"MaxConcurrency", @@ -12035,7 +12055,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"The OpsItem status. Status can be Open
, In Progress
, or Resolved
. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.
The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.
" }, "OpsItemId":{ "shape":"OpsItemId", @@ -12587,7 +12607,7 @@ }, "Status":{ "shape":"OpsItemStatus", - "documentation":"The OpsItem status. Status can be Open
, In Progress
, or Resolved
.
The OpsItem status.
" }, "OpsItemId":{ "shape":"OpsItemId", @@ -13442,7 +13462,7 @@ }, "State":{ "shape":"PatchComplianceDataState", - "documentation":"The state of the patch on the managed node, such as INSTALLED or FAILED.
For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The state of the patch on the managed node, such as INSTALLED or FAILED.
For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.
" }, "InstalledTime":{ "shape":"DateTime", @@ -13705,12 +13725,12 @@ }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", - "documentation":"The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7
means that patches are approved seven days after they are released.
This parameter is marked as not required, but your request must include a value for either ApproveAfterDays
or ApproveUntilDate
.
Not supported for Debian Server or Ubuntu Server.
", + "documentation":"The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7
means that patches are approved seven days after they are released.
This parameter is marked as Required: No
, but your request must include a value for either ApproveAfterDays
or ApproveUntilDate
.
Not supported for Debian Server or Ubuntu Server.
Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the Windows Server tab in the topic How security patches are selected in the Amazon Web Services Systems Manager User Guide.
The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.
Enter dates in the format YYYY-MM-DD
. For example, 2021-12-31
.
This parameter is marked as not required, but your request must include a value for either ApproveUntilDate
or ApproveAfterDays
.
Not supported for Debian Server or Ubuntu Server.
", + "documentation":"The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.
Enter dates in the format YYYY-MM-DD
. For example, 2024-12-31
.
This parameter is marked as Required: No
, but your request must include a value for either ApproveUntilDate
or ApproveAfterDays
.
Not supported for Debian Server or Ubuntu Server.
Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the Windows Server tab in the topic How security patches are selected in the Amazon Web Services Systems Manager User Guide.
The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "TaskType":{ "shape":"MaintenanceWindowTaskType", @@ -15277,7 +15297,7 @@ }, "value":{ "shape":"SessionFilterValue", - "documentation":"The filter value. Valid values for each filter key are as follows:
InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.
InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.
Target: Specify a managed node to which session connections have been made.
Owner: Specify an Amazon Web Services user to see a list of sessions started by that user.
Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:
Connected
Connecting
Disconnected
Terminated
Terminating
Failed
SessionId: Specify a session ID to return details about the session.
The filter value. Valid values for each filter key are as follows:
InvokedAfter: Specify a timestamp to limit your results. For example, specify 2024-08-29T00:00:00Z to see sessions that started August 29, 2024, and later.
InvokedBefore: Specify a timestamp to limit your results. For example, specify 2024-08-29T00:00:00Z to see sessions that started before August 29, 2024.
Target: Specify a managed node to which session connections have been made.
Owner: Specify an Amazon Web Services user to see a list of sessions started by that user.
Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include:
Connected
Connecting
Disconnected
Terminated
Terminating
Failed
SessionId: Specify a session ID to return details about the session.
Describes a filter for Session Manager information.
" @@ -15516,7 +15536,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"A key-value mapping to target resources. Required if you specify TargetParameterName.
" + "documentation":"A key-value mapping to target resources. Required if you specify TargetParameterName.
If both this parameter and the TargetLocation:Targets
parameter are supplied, TargetLocation:Targets
takes precedence.
The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10
.
The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10
.
If both this parameter and the TargetLocation:TargetsMaxConcurrency
are supplied, TargetLocation:TargetsMaxConcurrency
takes precedence.
The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received.
Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time.
" + "documentation":"The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received.
Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time.
If this parameter and the TargetLocation:TargetsMaxErrors
parameter are both supplied, TargetLocation:TargetsMaxErrors
takes precedence.
A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the Amazon Web Services Systems Manager User Guide.
", + "documentation":"A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide.
", "box":true }, "Tags":{ @@ -15542,6 +15562,10 @@ "AlarmConfiguration":{ "shape":"AlarmConfiguration", "documentation":"The CloudWatch alarm you want to apply to your automation.
" + }, + "TargetLocationsURL":{ + "shape":"TargetLocationsURL", + "documentation":"Specify a publicly accessible URL for a file that contains the TargetLocations
body. Currently, only files in presigned Amazon S3 buckets are supported.
Indicates whether to include child organizational units (OUs) that are children of the targeted OUs. The default is false
.
Amazon Web Services accounts or organizational units to exclude as expanded targets.
" + }, + "Targets":{ + "shape":"Targets", + "documentation":"A list of key-value mappings to target resources. If you specify values for this data type, you must also specify a value for TargetParameterName
.
This Targets
parameter takes precedence over the StartAutomationExecution:Targets
parameter if both are supplied.
The maximum number of targets allowed to run this task in parallel. This TargetsMaxConcurrency
takes precedence over the StartAutomationExecution:MaxConcurrency
parameter if both are supplied.
The maximum number of errors that are allowed before the system stops running the automation on additional targets. This TargetsMaxErrors
parameter takes precedence over the StartAutomationExecution:MaxErrors
parameter if both are supplied.
The combination of Amazon Web Services Regions and Amazon Web Services accounts targeted by the current Automation execution.
" @@ -15987,6 +16031,10 @@ "max":100, "min":1 }, + "TargetLocationsURL":{ + "type":"string", + "pattern":"^https:\\/\\/[-a-zA-Z0-9@:%._\\+~#=]{1,253}\\.s3(\\.[a-z\\d-]{9,16})?\\.amazonaws\\.com\\/.{1,2000}" + }, "TargetMap":{ "type":"map", "key":{"shape":"TargetMapKey"}, @@ -16021,7 +16069,7 @@ "members":{ "Message":{"shape":"String"} }, - "documentation":"The specified target managed node for the session isn't fully configured for use with Session Manager. For more information, see Getting started with Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you attempt to start a session on a managed node that is located in a different account or Region
", + "documentation":"The specified target managed node for the session isn't fully configured for use with Session Manager. For more information, see Setting up Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you attempt to start a session on a managed node that is located in a different account or Region
", "exception":true }, "TargetParameterList":{ @@ -16638,7 +16686,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -16710,7 +16758,7 @@ }, "ServiceRoleArn":{ "shape":"ServiceRole", - "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow
.
However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide.
" }, "TaskParameters":{ "shape":"MaintenanceWindowTaskParameters", @@ -16768,7 +16816,7 @@ }, "IamRole":{ "shape":"IamRole", - "documentation":"The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create the IAM service role required for Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
The OpsItem status. Status can be Open
, In Progress
, or Resolved
. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.
The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.
" }, "OpsItemId":{ "shape":"OpsItemId", @@ -16901,7 +16949,7 @@ }, "ApprovedPatches":{ "shape":"PatchIdList", - "documentation":"A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly approved patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "ApprovedPatchesComplianceLevel":{ "shape":"PatchComplianceLevel", @@ -16914,7 +16962,7 @@ }, "RejectedPatches":{ "shape":"PatchIdList", - "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" + "documentation":"A list of explicitly rejected patches for the baseline.
For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.
" }, "RejectedPatchesAction":{ "shape":"PatchAction", diff --git a/docs/source/conf.py b/docs/source/conf.py index 245870f5e8..91bd2a6292 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.20' +release = '1.35.21' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.