From bd40e1ef7e738d7a5495b356fe9d6f444128e2c7 Mon Sep 17 00:00:00 2001 From: Alex Woods Date: Mon, 19 Feb 2024 13:06:13 -0800 Subject: [PATCH 1/8] Remove dependencies on bigdecimal and base64 (#2987) --- gems/aws-sdk-core/CHANGELOG.md | 4 +++- gems/aws-sdk-core/aws-sdk-core.gemspec | 1 - services.json | 10 ++-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/gems/aws-sdk-core/CHANGELOG.md b/gems/aws-sdk-core/CHANGELOG.md index c12404a9113..d2c7beeafa6 100644 --- a/gems/aws-sdk-core/CHANGELOG.md +++ b/gems/aws-sdk-core/CHANGELOG.md @@ -1,6 +1,8 @@ Unreleased Changes ------------------ +* Issue - Remove base64 as dependency. + 3.191.2 (2024-02-14) ------------------ @@ -20,7 +22,7 @@ Unreleased Changes * Feature - Updated Aws::SSO::Client with the latest API changes. -* Feature - Add RBS signature files to support static type checking. +* Feature - Add RBS signature files to support static type checking 3.190.3 (2024-01-16) ------------------ diff --git a/gems/aws-sdk-core/aws-sdk-core.gemspec b/gems/aws-sdk-core/aws-sdk-core.gemspec index 64b99af8a1f..12f78a41c66 100644 --- a/gems/aws-sdk-core/aws-sdk-core.gemspec +++ b/gems/aws-sdk-core/aws-sdk-core.gemspec @@ -16,7 +16,6 @@ Gem::Specification.new do |spec| spec.add_dependency('aws-partitions', '~> 1', '>= 1.651.0') # necessary for new endpoint resolution spec.add_dependency('aws-sigv4', '~> 1.8') # necessary for s3 express auth spec.add_dependency('aws-eventstream', '~> 1', '>= 1.3.0') # necessary for binary eventstream - spec.add_dependency('base64') # necessary for base64 encoding/decoding spec.metadata = { 'source_code_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-core', diff --git a/services.json b/services.json index 0301386c90b..06371d83ece 100644 --- a/services.json +++ b/services.json @@ -371,19 +371,13 @@ "Aws::DynamoDB::Plugins::ExtendedRetries", "Aws::DynamoDB::Plugins::SimpleAttributes", "Aws::DynamoDB::Plugins::CRC32Validation" - ], - "dependencies": { - "bigdecimal": null - } + ] }, "DynamoDBStreams": { "models": "streams.dynamodb/2012-08-10", "addPlugins": [ "Aws::DynamoDBStreams::Plugins::SimpleAttributes" - ], - "dependencies": { - "bigdecimal": null - } + ] }, "EBS": { "models": "ebs/2019-11-02" From 1ad7f5f4378e90024b8b829a794c33681e666036 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Tue, 20 Feb 2024 19:09:49 +0000 Subject: [PATCH 2/8] Updated API models and rebuilt service gems. --- apis/dynamodb/2012-08-10/docs-2.json | 2 +- apis/firehose/2015-08-04/api-2.json | 190 ------------------ apis/firehose/2015-08-04/docs-2.json | 159 +-------------- apis/lambda/2015-03-31/api-2.json | 1 + apis/lambda/2015-03-31/docs-2.json | 4 +- gems/aws-sdk-core/CHANGELOG.md | 3 + gems/aws-sdk-core/VERSION | 2 +- gems/aws-sdk-core/lib/aws-sdk-sso.rb | 2 +- gems/aws-sdk-core/lib/aws-sdk-sso/client.rb | 2 +- gems/aws-sdk-core/lib/aws-sdk-ssooidc.rb | 2 +- .../lib/aws-sdk-ssooidc/client.rb | 2 +- gems/aws-sdk-core/lib/aws-sdk-sts.rb | 2 +- gems/aws-sdk-core/lib/aws-sdk-sts/client.rb | 2 +- gems/aws-sdk-dynamodb/CHANGELOG.md | 5 + gems/aws-sdk-dynamodb/VERSION | 2 +- .../aws-sdk-dynamodb/aws-sdk-dynamodb.gemspec | 1 - gems/aws-sdk-dynamodb/features/env.rb | 1 - gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb.rb | 3 +- .../lib/aws-sdk-dynamodb/client.rb | 12 +- gems/aws-sdk-dynamodb/spec/spec_helper.rb | 1 - gems/aws-sdk-dynamodbstreams/CHANGELOG.md | 5 + gems/aws-sdk-dynamodbstreams/VERSION | 2 +- .../aws-sdk-dynamodbstreams.gemspec | 1 - gems/aws-sdk-dynamodbstreams/features/env.rb | 1 - .../lib/aws-sdk-dynamodbstreams.rb | 3 +- .../lib/aws-sdk-dynamodbstreams/client.rb | 2 +- .../spec/spec_helper.rb | 1 - gems/aws-sdk-firehose/CHANGELOG.md | 5 + gems/aws-sdk-firehose/VERSION | 2 +- gems/aws-sdk-firehose/lib/aws-sdk-firehose.rb | 2 +- .../lib/aws-sdk-firehose/client.rb | 65 +----- .../lib/aws-sdk-firehose/client_api.rb | 105 ---------- .../lib/aws-sdk-firehose/endpoints.rb | 28 --- .../lib/aws-sdk-firehose/errors.rb | 127 ------------ .../lib/aws-sdk-firehose/plugins/endpoints.rb | 4 - .../lib/aws-sdk-firehose/types.rb | 183 ----------------- gems/aws-sdk-firehose/sig/client.rbs | 28 --- gems/aws-sdk-firehose/sig/errors.rbs | 24 --- gems/aws-sdk-firehose/sig/types.rbs | 75 ------- gems/aws-sdk-lambda/CHANGELOG.md | 5 + gems/aws-sdk-lambda/VERSION | 2 +- gems/aws-sdk-lambda/lib/aws-sdk-lambda.rb | 2 +- .../lib/aws-sdk-lambda/client.rb | 38 ++-- .../lib/aws-sdk-lambda/types.rb | 9 +- gems/aws-sdk-lambda/sig/client.rbs | 26 +-- gems/aws-sdk-lambda/sig/types.rbs | 18 +- 46 files changed, 99 insertions(+), 1062 deletions(-) diff --git a/apis/dynamodb/2012-08-10/docs-2.json b/apis/dynamodb/2012-08-10/docs-2.json index 7515e9877c3..d9872809e81 100644 --- a/apis/dynamodb/2012-08-10/docs-2.json +++ b/apis/dynamodb/2012-08-10/docs-2.json @@ -53,7 +53,7 @@ "UpdateGlobalTableSettings": "

Updates settings for a global table.

This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables.

", "UpdateItem": "

Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).

You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.

", "UpdateKinesisStreamingDestination": "

The command to update the Kinesis stream destination.

", - "UpdateTable": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "UpdateTable": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request on the base table nor any replicas. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", "UpdateTableReplicaAutoScaling": "

Updates auto scaling settings on your global tables at once.

This operation only applies to Version 2019.11.21 (Current) of global tables.

", "UpdateTimeToLive": "

The UpdateTimeToLive method enables or disables Time to Live (TTL) for the specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. It can take up to one hour for the change to fully process. Any additional UpdateTimeToLive calls for the same table during this one hour duration result in a ValidationException.

TTL compares the current time in epoch time format to the time stored in the TTL attribute of an item. If the epoch time value stored in the attribute is less than the current time, the item is marked as expired and subsequently deleted.

The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC.

DynamoDB deletes expired items on a best-effort basis to ensure availability of throughput for other data operations.

DynamoDB typically deletes expired items within two days of expiration. The exact duration within which an item gets deleted after expiration is specific to the nature of the workload. Items that have expired and not been deleted will still show up in reads, queries, and scans.

As items are deleted, they are removed from any local secondary index and global secondary index immediately in the same eventually consistent way as a standard delete operation.

For more information, see Time To Live in the Amazon DynamoDB Developer Guide.

" }, diff --git a/apis/firehose/2015-08-04/api-2.json b/apis/firehose/2015-08-04/api-2.json index fc1b7b613b7..f27292c47e7 100644 --- a/apis/firehose/2015-08-04/api-2.json +++ b/apis/firehose/2015-08-04/api-2.json @@ -53,22 +53,6 @@ {"shape":"ResourceNotFoundException"} ] }, - "GetKinesisStream":{ - "name":"GetKinesisStream", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GetKinesisStreamInput"}, - "output":{"shape":"GetKinesisStreamOutput"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidArgumentException"}, - {"shape":"InvalidStreamTypeException"}, - {"shape":"InvalidKMSResourceException"} - ], - "internalonly":true - }, "ListDeliveryStreams":{ "name":"ListDeliveryStreams", "http":{ @@ -199,24 +183,6 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ] - }, - "VerifyResourcesExistForTagris":{ - "name":"VerifyResourcesExistForTagris", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"TagrisVerifyResourcesExistInput"}, - "output":{"shape":"TagrisVerifyResourcesExistOutput"}, - "errors":[ - {"shape":"TagrisAccessDeniedException"}, - {"shape":"TagrisInternalServiceException"}, - {"shape":"TagrisInvalidArnException"}, - {"shape":"TagrisInvalidParameterException"}, - {"shape":"TagrisPartialResourcesExistResultsException"}, - {"shape":"TagrisThrottledException"} - ], - "internalonly":true } }, "shapes":{ @@ -226,10 +192,6 @@ "min":1, "pattern":"arn:.*" }, - "AccessKeyId":{ - "type":"string", - "sensitive":true - }, "AmazonOpenSearchServerlessBufferingHints":{ "type":"structure", "members":{ @@ -1044,21 +1006,6 @@ "min":0, "pattern":"^$|\\.[0-9a-z!\\-_.*'()]+" }, - "FirehoseSource":{"type":"string"}, - "GetKinesisStreamInput":{ - "type":"structure", - "required":["DeliveryStreamARN"], - "members":{ - "DeliveryStreamARN":{"shape":"DeliveryStreamARN"} - } - }, - "GetKinesisStreamOutput":{ - "type":"structure", - "members":{ - "KinesisStreamARN":{"shape":"KinesisStreamARN"}, - "CredentialsForReadingKinesisStream":{"shape":"SessionCredentials"} - } - }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -1278,14 +1225,6 @@ }, "exception":true }, - "InvalidStreamTypeException":{ - "type":"structure", - "members":{ - "message":{"shape":"ErrorMessage"}, - "source":{"shape":"FirehoseSource"} - }, - "exception":true - }, "KMSEncryptionConfig":{ "type":"structure", "required":["AWSKMSKeyARN"], @@ -1874,10 +1813,6 @@ "VersionId":{"shape":"NonEmptyStringWithoutWhitespace"} } }, - "SecretAccessKey":{ - "type":"string", - "sensitive":true - }, "SecurityGroupIdList":{ "type":"list", "member":{"shape":"NonEmptyStringWithoutWhitespace"}, @@ -1899,26 +1834,6 @@ "exception":true, "fault":true }, - "SessionCredentials":{ - "type":"structure", - "required":[ - "AccessKeyId", - "SecretAccessKey", - "SessionToken", - "Expiration" - ], - "members":{ - "AccessKeyId":{"shape":"AccessKeyId"}, - "SecretAccessKey":{"shape":"SecretAccessKey"}, - "SessionToken":{"shape":"SessionToken"}, - "Expiration":{"shape":"Timestamp"} - }, - "sensitive":true - }, - "SessionToken":{ - "type":"string", - "sensitive":true - }, "SizeInMBs":{ "type":"integer", "max":128, @@ -2281,111 +2196,6 @@ "min":0, "pattern":"^[\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@%]*$" }, - "TagrisAccessDeniedException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"} - }, - "exception":true - }, - "TagrisAccountId":{ - "type":"string", - "max":12, - "min":12 - }, - "TagrisAmazonResourceName":{ - "type":"string", - "max":1011, - "min":1 - }, - "TagrisExceptionMessage":{ - "type":"string", - "max":2048, - "min":0 - }, - "TagrisInternalId":{ - "type":"string", - "max":64, - "min":0 - }, - "TagrisInternalServiceException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"} - }, - "exception":true, - "fault":true - }, - "TagrisInvalidArnException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"}, - "sweepListItem":{"shape":"TagrisSweepListItem"} - }, - "exception":true - }, - "TagrisInvalidParameterException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"} - }, - "exception":true - }, - "TagrisPartialResourcesExistResultsException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"}, - "resourceExistenceInformation":{"shape":"TagrisSweepListResult"} - }, - "exception":true - }, - "TagrisStatus":{ - "type":"string", - "enum":[ - "ACTIVE", - "NOT_ACTIVE" - ] - }, - "TagrisSweepList":{ - "type":"list", - "member":{"shape":"TagrisSweepListItem"} - }, - "TagrisSweepListItem":{ - "type":"structure", - "members":{ - "TagrisAccountId":{"shape":"TagrisAccountId"}, - "TagrisAmazonResourceName":{"shape":"TagrisAmazonResourceName"}, - "TagrisInternalId":{"shape":"TagrisInternalId"}, - "TagrisVersion":{"shape":"TagrisVersion"} - } - }, - "TagrisSweepListResult":{ - "type":"map", - "key":{"shape":"TagrisAmazonResourceName"}, - "value":{"shape":"TagrisStatus"} - }, - "TagrisThrottledException":{ - "type":"structure", - "members":{ - "message":{"shape":"TagrisExceptionMessage"} - }, - "exception":true - }, - "TagrisVerifyResourcesExistInput":{ - "type":"structure", - "required":["TagrisSweepList"], - "members":{ - "TagrisSweepList":{"shape":"TagrisSweepList"} - } - }, - "TagrisVerifyResourcesExistOutput":{ - "type":"structure", - "required":["TagrisSweepListResult"], - "members":{ - "TagrisSweepListResult":{"shape":"TagrisSweepListResult"} - } - }, - "TagrisVersion":{"type":"long"}, "Timestamp":{"type":"timestamp"}, "TopicName":{ "type":"string", diff --git a/apis/firehose/2015-08-04/docs-2.json b/apis/firehose/2015-08-04/docs-2.json index 7c23feb1a32..4cbbe0db2c3 100644 --- a/apis/firehose/2015-08-04/docs-2.json +++ b/apis/firehose/2015-08-04/docs-2.json @@ -5,7 +5,6 @@ "CreateDeliveryStream": "

Creates a Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

", "DeleteDeliveryStream": "

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

", "DescribeDeliveryStream": "

Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

", - "GetKinesisStream": null, "ListDeliveryStreams": "

Lists your delivery streams in alphabetical order of their names.

The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.

", "ListTagsForDeliveryStream": "

Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

", "PutRecord": "

Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", @@ -14,8 +13,7 @@ "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "TagDeliveryStream": "

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

", "UntagDeliveryStream": "

Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

If you specify a tag that doesn't exist, the operation ignores it.

This operation has a limit of five transactions per second per account.

", - "UpdateDestination": "

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

", - "VerifyResourcesExistForTagris": null + "UpdateDestination": "

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" }, "shapes": { "AWSKMSKeyARN": { @@ -26,12 +24,6 @@ "KMSEncryptionConfig$AWSKMSKeyARN": "

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same Amazon Web Services Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" } }, - "AccessKeyId": { - "base": null, - "refs": { - "SessionCredentials$AccessKeyId": null - } - }, "AmazonOpenSearchServerlessBufferingHints": { "base": "

Describes the buffering to perform before delivering data to the Serverless offering for Amazon OpenSearch Service destination.

", "refs": { @@ -421,8 +413,7 @@ "base": null, "refs": { "CreateDeliveryStreamOutput$DeliveryStreamARN": "

The ARN of the delivery stream.

", - "DeliveryStreamDescription$DeliveryStreamARN": "

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

", - "GetKinesisStreamInput$DeliveryStreamARN": null + "DeliveryStreamDescription$DeliveryStreamARN": "

The Amazon Resource Name (ARN) of the delivery stream. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" } }, "DeliveryStreamDescription": { @@ -688,7 +679,6 @@ "InvalidArgumentException$message": "

A message that provides information about the error.

", "InvalidKMSResourceException$message": null, "InvalidSourceException$message": null, - "InvalidStreamTypeException$message": null, "LimitExceededException$message": "

A message that provides information about the error.

", "PutRecordBatchResponseEntry$ErrorMessage": "

The error message for an individual record result.

", "ResourceInUseException$message": "

A message that provides information about the error.

", @@ -740,22 +730,6 @@ "ExtendedS3DestinationUpdate$FileExtension": "

Specify a file extension. It will override the default file extension

" } }, - "FirehoseSource": { - "base": null, - "refs": { - "InvalidStreamTypeException$source": null - } - }, - "GetKinesisStreamInput": { - "base": null, - "refs": { - } - }, - "GetKinesisStreamOutput": { - "base": null, - "refs": { - } - }, "HECAcknowledgmentTimeoutInSeconds": { "base": null, "refs": { @@ -946,11 +920,6 @@ "refs": { } }, - "InvalidStreamTypeException": { - "base": null, - "refs": { - } - }, "KMSEncryptionConfig": { "base": "

Describes an encryption key for a destination in Amazon S3.

", "refs": { @@ -967,7 +936,6 @@ "KinesisStreamARN": { "base": null, "refs": { - "GetKinesisStreamOutput$KinesisStreamARN": null, "KinesisStreamSourceConfiguration$KinesisStreamARN": "

The ARN of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

", "KinesisStreamSourceDescription$KinesisStreamARN": "

The Amazon Resource Name (ARN) of the source Kinesis data stream. For more information, see Amazon Kinesis Data Streams ARN Format.

" } @@ -1477,12 +1445,6 @@ "DataFormatConversionConfiguration$SchemaConfiguration": "

Specifies the Amazon Web Services Glue Data Catalog table that contains the column information. This parameter is required if Enabled is set to true.

" } }, - "SecretAccessKey": { - "base": null, - "refs": { - "SessionCredentials$SecretAccessKey": null - } - }, "SecurityGroupIdList": { "base": null, "refs": { @@ -1501,18 +1463,6 @@ "refs": { } }, - "SessionCredentials": { - "base": null, - "refs": { - "GetKinesisStreamOutput$CredentialsForReadingKinesisStream": null - } - }, - "SessionToken": { - "base": null, - "refs": { - "SessionCredentials$SessionToken": null - } - }, "SizeInMBs": { "base": null, "refs": { @@ -1801,114 +1751,11 @@ "Tag$Value": "

An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @

" } }, - "TagrisAccessDeniedException": { - "base": null, - "refs": { - } - }, - "TagrisAccountId": { - "base": null, - "refs": { - "TagrisSweepListItem$TagrisAccountId": null - } - }, - "TagrisAmazonResourceName": { - "base": null, - "refs": { - "TagrisSweepListItem$TagrisAmazonResourceName": null, - "TagrisSweepListResult$key": null - } - }, - "TagrisExceptionMessage": { - "base": null, - "refs": { - "TagrisAccessDeniedException$message": null, - "TagrisInternalServiceException$message": null, - "TagrisInvalidArnException$message": null, - "TagrisInvalidParameterException$message": null, - "TagrisPartialResourcesExistResultsException$message": null, - "TagrisThrottledException$message": null - } - }, - "TagrisInternalId": { - "base": null, - "refs": { - "TagrisSweepListItem$TagrisInternalId": null - } - }, - "TagrisInternalServiceException": { - "base": null, - "refs": { - } - }, - "TagrisInvalidArnException": { - "base": null, - "refs": { - } - }, - "TagrisInvalidParameterException": { - "base": null, - "refs": { - } - }, - "TagrisPartialResourcesExistResultsException": { - "base": null, - "refs": { - } - }, - "TagrisStatus": { - "base": null, - "refs": { - "TagrisSweepListResult$value": null - } - }, - "TagrisSweepList": { - "base": null, - "refs": { - "TagrisVerifyResourcesExistInput$TagrisSweepList": null - } - }, - "TagrisSweepListItem": { - "base": null, - "refs": { - "TagrisInvalidArnException$sweepListItem": null, - "TagrisSweepList$member": null - } - }, - "TagrisSweepListResult": { - "base": null, - "refs": { - "TagrisPartialResourcesExistResultsException$resourceExistenceInformation": null, - "TagrisVerifyResourcesExistOutput$TagrisSweepListResult": null - } - }, - "TagrisThrottledException": { - "base": null, - "refs": { - } - }, - "TagrisVerifyResourcesExistInput": { - "base": null, - "refs": { - } - }, - "TagrisVerifyResourcesExistOutput": { - "base": null, - "refs": { - } - }, - "TagrisVersion": { - "base": null, - "refs": { - "TagrisSweepListItem$TagrisVersion": null - } - }, "Timestamp": { "base": null, "refs": { "DeliveryStreamDescription$CreateTimestamp": "

The date and time that the delivery stream was created.

", - "DeliveryStreamDescription$LastUpdateTimestamp": "

The date and time that the delivery stream was last updated.

", - "SessionCredentials$Expiration": null + "DeliveryStreamDescription$LastUpdateTimestamp": "

The date and time that the delivery stream was last updated.

" } }, "TopicName": { diff --git a/apis/lambda/2015-03-31/api-2.json b/apis/lambda/2015-03-31/api-2.json index 42c394a4b6e..621ec829a69 100644 --- a/apis/lambda/2015-03-31/api-2.json +++ b/apis/lambda/2015-03-31/api-2.json @@ -3889,6 +3889,7 @@ "dotnetcore2.1", "dotnetcore3.1", "dotnet6", + "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", diff --git a/apis/lambda/2015-03-31/docs-2.json b/apis/lambda/2015-03-31/docs-2.json index 169d43ef657..044d945d823 100644 --- a/apis/lambda/2015-03-31/docs-2.json +++ b/apis/lambda/2015-03-31/docs-2.json @@ -502,7 +502,7 @@ "DestinationArn": { "base": null, "refs": { - "OnFailure$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

", + "OnFailure$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

", "OnSuccess$Destination": "

The Amazon Resource Name (ARN) of the destination resource.

" } }, @@ -510,7 +510,7 @@ "base": "

A configuration object that specifies the destination of an event after Lambda processes it.

", "refs": { "CreateEventSourceMappingRequest$DestinationConfig": "

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

", - "EventSourceMappingConfiguration$DestinationConfig": "

(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

", + "EventSourceMappingConfiguration$DestinationConfig": "

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.

", "FunctionEventInvokeConfig$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", "PutFunctionEventInvokeConfigRequest$DestinationConfig": "

A destination for events after they have been sent to a function for processing.

Destinations

", "UpdateEventSourceMappingRequest$DestinationConfig": "

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

", diff --git a/gems/aws-sdk-core/CHANGELOG.md b/gems/aws-sdk-core/CHANGELOG.md index d2c7beeafa6..597b9289d72 100644 --- a/gems/aws-sdk-core/CHANGELOG.md +++ b/gems/aws-sdk-core/CHANGELOG.md @@ -1,6 +1,9 @@ Unreleased Changes ------------------ +3.191.3 (2024-02-20) +------------------ + * Issue - Remove base64 as dependency. 3.191.2 (2024-02-14) diff --git a/gems/aws-sdk-core/VERSION b/gems/aws-sdk-core/VERSION index 8fffe10c80f..229989cb4ca 100644 --- a/gems/aws-sdk-core/VERSION +++ b/gems/aws-sdk-core/VERSION @@ -1 +1 @@ -3.191.2 +3.191.3 diff --git a/gems/aws-sdk-core/lib/aws-sdk-sso.rb b/gems/aws-sdk-core/lib/aws-sdk-sso.rb index 10478ed08cd..8077d9994a9 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-sso.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-sso.rb @@ -54,6 +54,6 @@ # @!group service module Aws::SSO - GEM_VERSION = '3.191.2' + GEM_VERSION = '3.191.3' end diff --git a/gems/aws-sdk-core/lib/aws-sdk-sso/client.rb b/gems/aws-sdk-core/lib/aws-sdk-sso/client.rb index 4503c83b476..6b04c0ca49d 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-sso/client.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-sso/client.rb @@ -605,7 +605,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-core' - context[:gem_version] = '3.191.2' + context[:gem_version] = '3.191.3' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-core/lib/aws-sdk-ssooidc.rb b/gems/aws-sdk-core/lib/aws-sdk-ssooidc.rb index ffa6f94d988..ac16c1b427b 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-ssooidc.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-ssooidc.rb @@ -54,6 +54,6 @@ # @!group service module Aws::SSOOIDC - GEM_VERSION = '3.191.2' + GEM_VERSION = '3.191.3' end diff --git a/gems/aws-sdk-core/lib/aws-sdk-ssooidc/client.rb b/gems/aws-sdk-core/lib/aws-sdk-ssooidc/client.rb index 874ad909b9b..e626619a381 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-ssooidc/client.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-ssooidc/client.rb @@ -910,7 +910,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-core' - context[:gem_version] = '3.191.2' + context[:gem_version] = '3.191.3' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-core/lib/aws-sdk-sts.rb b/gems/aws-sdk-core/lib/aws-sdk-sts.rb index 17414040580..62e47accaa2 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-sts.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-sts.rb @@ -54,6 +54,6 @@ # @!group service module Aws::STS - GEM_VERSION = '3.191.2' + GEM_VERSION = '3.191.3' end diff --git a/gems/aws-sdk-core/lib/aws-sdk-sts/client.rb b/gems/aws-sdk-core/lib/aws-sdk-sts/client.rb index a1286a60b0f..c85fddff5ad 100644 --- a/gems/aws-sdk-core/lib/aws-sdk-sts/client.rb +++ b/gems/aws-sdk-core/lib/aws-sdk-sts/client.rb @@ -2352,7 +2352,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-core' - context[:gem_version] = '3.191.2' + context[:gem_version] = '3.191.3' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-dynamodb/CHANGELOG.md b/gems/aws-sdk-dynamodb/CHANGELOG.md index c1d38b7123f..67aff1b2f78 100644 --- a/gems/aws-sdk-dynamodb/CHANGELOG.md +++ b/gems/aws-sdk-dynamodb/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.104.0 (2024-02-20) +------------------ + +* Feature - Publishing quick fix for doc only update. + 1.103.0 (2024-02-14) ------------------ diff --git a/gems/aws-sdk-dynamodb/VERSION b/gems/aws-sdk-dynamodb/VERSION index e402df2ddc9..9f0b4661158 100644 --- a/gems/aws-sdk-dynamodb/VERSION +++ b/gems/aws-sdk-dynamodb/VERSION @@ -1 +1 @@ -1.103.0 +1.104.0 diff --git a/gems/aws-sdk-dynamodb/aws-sdk-dynamodb.gemspec b/gems/aws-sdk-dynamodb/aws-sdk-dynamodb.gemspec index 1ffa080beed..6321e84970f 100644 --- a/gems/aws-sdk-dynamodb/aws-sdk-dynamodb.gemspec +++ b/gems/aws-sdk-dynamodb/aws-sdk-dynamodb.gemspec @@ -25,7 +25,6 @@ Gem::Specification.new do |spec| 'changelog_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-dynamodb/CHANGELOG.md' } - spec.add_dependency('bigdecimal') spec.add_dependency('aws-sdk-core', '~> 3', '>= 3.191.0') spec.add_dependency('aws-sigv4', '~> 1.1') diff --git a/gems/aws-sdk-dynamodb/features/env.rb b/gems/aws-sdk-dynamodb/features/env.rb index 61992e39acd..8b71fe12114 100644 --- a/gems/aws-sdk-dynamodb/features/env.rb +++ b/gems/aws-sdk-dynamodb/features/env.rb @@ -9,7 +9,6 @@ $:.unshift(File.expand_path('../../lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/features', __FILE__)) -$:.unshift(File.expand_path('../../../bigdecimal/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__)) diff --git a/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb.rb b/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb.rb index e8c68c5badd..9a67933c3a6 100644 --- a/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb.rb +++ b/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb.rb @@ -8,7 +8,6 @@ # WARNING ABOUT GENERATED CODE -require 'bigdecimal' require 'aws-sdk-core' require 'aws-sigv4' @@ -55,6 +54,6 @@ # @!group service module Aws::DynamoDB - GEM_VERSION = '1.103.0' + GEM_VERSION = '1.104.0' end diff --git a/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb/client.rb b/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb/client.rb index 0ba24f8c566..9c4a2736daa 100644 --- a/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb/client.rb +++ b/gems/aws-sdk-dynamodb/lib/aws-sdk-dynamodb/client.rb @@ -7327,11 +7327,11 @@ def update_kinesis_streaming_destination(params = {}, options = {}) # begins backfilling, you can use `UpdateTable` to perform other # operations. # - # `UpdateTable` is an asynchronous operation; while it is executing, the - # table status changes from `ACTIVE` to `UPDATING`. While it is - # `UPDATING`, you cannot issue another `UpdateTable` request. When the - # table returns to the `ACTIVE` state, the `UpdateTable` operation is - # complete. + # `UpdateTable` is an asynchronous operation; while it's executing, the + # table status changes from `ACTIVE` to `UPDATING`. While it's + # `UPDATING`, you can't issue another `UpdateTable` request on the base + # table nor any replicas. When the table returns to the `ACTIVE` state, + # the `UpdateTable` operation is complete. # # # @@ -7924,7 +7924,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-dynamodb' - context[:gem_version] = '1.103.0' + context[:gem_version] = '1.104.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-dynamodb/spec/spec_helper.rb b/gems/aws-sdk-dynamodb/spec/spec_helper.rb index 50f410b0847..23832f81a8a 100644 --- a/gems/aws-sdk-dynamodb/spec/spec_helper.rb +++ b/gems/aws-sdk-dynamodb/spec/spec_helper.rb @@ -10,7 +10,6 @@ require_relative '../../aws-sdk-core/spec/shared_spec_helper' $:.unshift(File.expand_path('../../lib', __FILE__)) -$:.unshift(File.expand_path('../../../bigdecimal/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__)) diff --git a/gems/aws-sdk-dynamodbstreams/CHANGELOG.md b/gems/aws-sdk-dynamodbstreams/CHANGELOG.md index 297d88f7f36..72774652027 100644 --- a/gems/aws-sdk-dynamodbstreams/CHANGELOG.md +++ b/gems/aws-sdk-dynamodbstreams/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.55.0 (2024-02-20) +------------------ + +* Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details. + 1.54.0 (2024-02-14) ------------------ diff --git a/gems/aws-sdk-dynamodbstreams/VERSION b/gems/aws-sdk-dynamodbstreams/VERSION index b7921ae87bc..094d6ad00ce 100644 --- a/gems/aws-sdk-dynamodbstreams/VERSION +++ b/gems/aws-sdk-dynamodbstreams/VERSION @@ -1 +1 @@ -1.54.0 +1.55.0 diff --git a/gems/aws-sdk-dynamodbstreams/aws-sdk-dynamodbstreams.gemspec b/gems/aws-sdk-dynamodbstreams/aws-sdk-dynamodbstreams.gemspec index 1e9481d0520..9c6960b191c 100644 --- a/gems/aws-sdk-dynamodbstreams/aws-sdk-dynamodbstreams.gemspec +++ b/gems/aws-sdk-dynamodbstreams/aws-sdk-dynamodbstreams.gemspec @@ -25,7 +25,6 @@ Gem::Specification.new do |spec| 'changelog_uri' => 'https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-dynamodbstreams/CHANGELOG.md' } - spec.add_dependency('bigdecimal') spec.add_dependency('aws-sdk-core', '~> 3', '>= 3.191.0') spec.add_dependency('aws-sigv4', '~> 1.1') diff --git a/gems/aws-sdk-dynamodbstreams/features/env.rb b/gems/aws-sdk-dynamodbstreams/features/env.rb index 5174abdb177..25ac877b57a 100644 --- a/gems/aws-sdk-dynamodbstreams/features/env.rb +++ b/gems/aws-sdk-dynamodbstreams/features/env.rb @@ -9,7 +9,6 @@ $:.unshift(File.expand_path('../../lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/features', __FILE__)) -$:.unshift(File.expand_path('../../../bigdecimal/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__)) diff --git a/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams.rb b/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams.rb index cdba37a9a2e..4e58e156b97 100644 --- a/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams.rb +++ b/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams.rb @@ -8,7 +8,6 @@ # WARNING ABOUT GENERATED CODE -require 'bigdecimal' require 'aws-sdk-core' require 'aws-sigv4' @@ -53,6 +52,6 @@ # @!group service module Aws::DynamoDBStreams - GEM_VERSION = '1.54.0' + GEM_VERSION = '1.55.0' end diff --git a/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams/client.rb b/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams/client.rb index d16fae96fda..613f9bde142 100644 --- a/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams/client.rb +++ b/gems/aws-sdk-dynamodbstreams/lib/aws-sdk-dynamodbstreams/client.rb @@ -854,7 +854,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-dynamodbstreams' - context[:gem_version] = '1.54.0' + context[:gem_version] = '1.55.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-dynamodbstreams/spec/spec_helper.rb b/gems/aws-sdk-dynamodbstreams/spec/spec_helper.rb index b2302b01a04..eef3c6cd1fe 100644 --- a/gems/aws-sdk-dynamodbstreams/spec/spec_helper.rb +++ b/gems/aws-sdk-dynamodbstreams/spec/spec_helper.rb @@ -10,7 +10,6 @@ require_relative '../../aws-sdk-core/spec/shared_spec_helper' $:.unshift(File.expand_path('../../lib', __FILE__)) -$:.unshift(File.expand_path('../../../bigdecimal/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__)) $:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__)) diff --git a/gems/aws-sdk-firehose/CHANGELOG.md b/gems/aws-sdk-firehose/CHANGELOG.md index d202d360d93..672cda3294d 100644 --- a/gems/aws-sdk-firehose/CHANGELOG.md +++ b/gems/aws-sdk-firehose/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.65.0 (2024-02-20) +------------------ + +* Feature - This release updates a few Firehose related APIs. + 1.64.0 (2024-02-16) ------------------ diff --git a/gems/aws-sdk-firehose/VERSION b/gems/aws-sdk-firehose/VERSION index 9405730420f..902c74186fb 100644 --- a/gems/aws-sdk-firehose/VERSION +++ b/gems/aws-sdk-firehose/VERSION @@ -1 +1 @@ -1.64.0 +1.65.0 diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose.rb index c2a93f3a11e..ca1198f3340 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose.rb @@ -52,6 +52,6 @@ # @!group service module Aws::Firehose - GEM_VERSION = '1.64.0' + GEM_VERSION = '1.65.0' end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client.rb index 323f4da123a..90fd6cd9fba 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client.rb @@ -1651,36 +1651,6 @@ def describe_delivery_stream(params = {}, options = {}) req.send_request(options) end - # @option params [required, String] :delivery_stream_arn - # - # @return [Types::GetKinesisStreamOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: - # - # * {Types::GetKinesisStreamOutput#kinesis_stream_arn #kinesis_stream_arn} => String - # * {Types::GetKinesisStreamOutput#credentials_for_reading_kinesis_stream #credentials_for_reading_kinesis_stream} => Types::SessionCredentials - # - # @example Request syntax with placeholder values - # - # resp = client.get_kinesis_stream({ - # delivery_stream_arn: "DeliveryStreamARN", # required - # }) - # - # @example Response structure - # - # resp.kinesis_stream_arn #=> String - # resp.credentials_for_reading_kinesis_stream.access_key_id #=> String - # resp.credentials_for_reading_kinesis_stream.secret_access_key #=> String - # resp.credentials_for_reading_kinesis_stream.session_token #=> String - # resp.credentials_for_reading_kinesis_stream.expiration #=> Time - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStream AWS API Documentation - # - # @overload get_kinesis_stream(params = {}) - # @param [Hash] params ({}) - def get_kinesis_stream(params = {}, options = {}) - req = build_request(:get_kinesis_stream, params) - req.send_request(options) - end - # Lists your delivery streams in alphabetical order of their names. # # The number of delivery streams might be too large to return using a @@ -2881,39 +2851,6 @@ def update_destination(params = {}, options = {}) req.send_request(options) end - # @option params [required, Array] :tagris_sweep_list - # - # @return [Types::TagrisVerifyResourcesExistOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods: - # - # * {Types::TagrisVerifyResourcesExistOutput#tagris_sweep_list_result #tagris_sweep_list_result} => Hash<String,String> - # - # @example Request syntax with placeholder values - # - # resp = client.verify_resources_exist_for_tagris({ - # tagris_sweep_list: [ # required - # { - # tagris_account_id: "TagrisAccountId", - # tagris_amazon_resource_name: "TagrisAmazonResourceName", - # tagris_internal_id: "TagrisInternalId", - # tagris_version: 1, - # }, - # ], - # }) - # - # @example Response structure - # - # resp.tagris_sweep_list_result #=> Hash - # resp.tagris_sweep_list_result["TagrisAmazonResourceName"] #=> String, one of "ACTIVE", "NOT_ACTIVE" - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/VerifyResourcesExistForTagris AWS API Documentation - # - # @overload verify_resources_exist_for_tagris(params = {}) - # @param [Hash] params ({}) - def verify_resources_exist_for_tagris(params = {}, options = {}) - req = build_request(:verify_resources_exist_for_tagris, params) - req.send_request(options) - end - # @!endgroup # @param params ({}) @@ -2927,7 +2864,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-firehose' - context[:gem_version] = '1.64.0' + context[:gem_version] = '1.65.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client_api.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client_api.rb index 7ac9d73f7c1..128e36bbd82 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client_api.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/client_api.rb @@ -14,7 +14,6 @@ module ClientApi include Seahorse::Model AWSKMSKeyARN = Shapes::StringShape.new(name: 'AWSKMSKeyARN') - AccessKeyId = Shapes::StringShape.new(name: 'AccessKeyId') AmazonOpenSearchServerlessBufferingHints = Shapes::StructureShape.new(name: 'AmazonOpenSearchServerlessBufferingHints') AmazonOpenSearchServerlessBufferingIntervalInSeconds = Shapes::IntegerShape.new(name: 'AmazonOpenSearchServerlessBufferingIntervalInSeconds') AmazonOpenSearchServerlessBufferingSizeInMBs = Shapes::IntegerShape.new(name: 'AmazonOpenSearchServerlessBufferingSizeInMBs') @@ -108,9 +107,6 @@ module ClientApi ExtendedS3DestinationUpdate = Shapes::StructureShape.new(name: 'ExtendedS3DestinationUpdate') FailureDescription = Shapes::StructureShape.new(name: 'FailureDescription') FileExtension = Shapes::StringShape.new(name: 'FileExtension') - FirehoseSource = Shapes::StringShape.new(name: 'FirehoseSource') - GetKinesisStreamInput = Shapes::StructureShape.new(name: 'GetKinesisStreamInput') - GetKinesisStreamOutput = Shapes::StructureShape.new(name: 'GetKinesisStreamOutput') HECAcknowledgmentTimeoutInSeconds = Shapes::IntegerShape.new(name: 'HECAcknowledgmentTimeoutInSeconds') HECEndpoint = Shapes::StringShape.new(name: 'HECEndpoint') HECEndpointType = Shapes::StringShape.new(name: 'HECEndpointType') @@ -140,7 +136,6 @@ module ClientApi InvalidArgumentException = Shapes::StructureShape.new(name: 'InvalidArgumentException') InvalidKMSResourceException = Shapes::StructureShape.new(name: 'InvalidKMSResourceException') InvalidSourceException = Shapes::StructureShape.new(name: 'InvalidSourceException') - InvalidStreamTypeException = Shapes::StructureShape.new(name: 'InvalidStreamTypeException') KMSEncryptionConfig = Shapes::StructureShape.new(name: 'KMSEncryptionConfig') KeyType = Shapes::StringShape.new(name: 'KeyType') KinesisStreamARN = Shapes::StringShape.new(name: 'KinesisStreamARN') @@ -212,12 +207,9 @@ module ClientApi S3DestinationDescription = Shapes::StructureShape.new(name: 'S3DestinationDescription') S3DestinationUpdate = Shapes::StructureShape.new(name: 'S3DestinationUpdate') SchemaConfiguration = Shapes::StructureShape.new(name: 'SchemaConfiguration') - SecretAccessKey = Shapes::StringShape.new(name: 'SecretAccessKey') SecurityGroupIdList = Shapes::ListShape.new(name: 'SecurityGroupIdList') Serializer = Shapes::StructureShape.new(name: 'Serializer') ServiceUnavailableException = Shapes::StructureShape.new(name: 'ServiceUnavailableException') - SessionCredentials = Shapes::StructureShape.new(name: 'SessionCredentials') - SessionToken = Shapes::StringShape.new(name: 'SessionToken') SizeInMBs = Shapes::IntegerShape.new(name: 'SizeInMBs') SnowflakeAccountUrl = Shapes::StringShape.new(name: 'SnowflakeAccountUrl') SnowflakeContentColumnName = Shapes::StringShape.new(name: 'SnowflakeContentColumnName') @@ -261,23 +253,6 @@ module ClientApi TagKey = Shapes::StringShape.new(name: 'TagKey') TagKeyList = Shapes::ListShape.new(name: 'TagKeyList') TagValue = Shapes::StringShape.new(name: 'TagValue') - TagrisAccessDeniedException = Shapes::StructureShape.new(name: 'TagrisAccessDeniedException') - TagrisAccountId = Shapes::StringShape.new(name: 'TagrisAccountId') - TagrisAmazonResourceName = Shapes::StringShape.new(name: 'TagrisAmazonResourceName') - TagrisExceptionMessage = Shapes::StringShape.new(name: 'TagrisExceptionMessage') - TagrisInternalId = Shapes::StringShape.new(name: 'TagrisInternalId') - TagrisInternalServiceException = Shapes::StructureShape.new(name: 'TagrisInternalServiceException') - TagrisInvalidArnException = Shapes::StructureShape.new(name: 'TagrisInvalidArnException') - TagrisInvalidParameterException = Shapes::StructureShape.new(name: 'TagrisInvalidParameterException') - TagrisPartialResourcesExistResultsException = Shapes::StructureShape.new(name: 'TagrisPartialResourcesExistResultsException') - TagrisStatus = Shapes::StringShape.new(name: 'TagrisStatus') - TagrisSweepList = Shapes::ListShape.new(name: 'TagrisSweepList') - TagrisSweepListItem = Shapes::StructureShape.new(name: 'TagrisSweepListItem') - TagrisSweepListResult = Shapes::MapShape.new(name: 'TagrisSweepListResult') - TagrisThrottledException = Shapes::StructureShape.new(name: 'TagrisThrottledException') - TagrisVerifyResourcesExistInput = Shapes::StructureShape.new(name: 'TagrisVerifyResourcesExistInput') - TagrisVerifyResourcesExistOutput = Shapes::StructureShape.new(name: 'TagrisVerifyResourcesExistOutput') - TagrisVersion = Shapes::IntegerShape.new(name: 'TagrisVersion') Timestamp = Shapes::TimestampShape.new(name: 'Timestamp') TopicName = Shapes::StringShape.new(name: 'TopicName') UntagDeliveryStreamInput = Shapes::StructureShape.new(name: 'UntagDeliveryStreamInput') @@ -609,13 +584,6 @@ module ClientApi FailureDescription.add_member(:details, Shapes::ShapeRef.new(shape: NonEmptyString, required: true, location_name: "Details")) FailureDescription.struct_class = Types::FailureDescription - GetKinesisStreamInput.add_member(:delivery_stream_arn, Shapes::ShapeRef.new(shape: DeliveryStreamARN, required: true, location_name: "DeliveryStreamARN")) - GetKinesisStreamInput.struct_class = Types::GetKinesisStreamInput - - GetKinesisStreamOutput.add_member(:kinesis_stream_arn, Shapes::ShapeRef.new(shape: KinesisStreamARN, location_name: "KinesisStreamARN")) - GetKinesisStreamOutput.add_member(:credentials_for_reading_kinesis_stream, Shapes::ShapeRef.new(shape: SessionCredentials, location_name: "CredentialsForReadingKinesisStream")) - GetKinesisStreamOutput.struct_class = Types::GetKinesisStreamOutput - HiveJsonSerDe.add_member(:timestamp_formats, Shapes::ShapeRef.new(shape: ListOfNonEmptyStrings, location_name: "TimestampFormats")) HiveJsonSerDe.struct_class = Types::HiveJsonSerDe @@ -692,10 +660,6 @@ module ClientApi InvalidSourceException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "message")) InvalidSourceException.struct_class = Types::InvalidSourceException - InvalidStreamTypeException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "message")) - InvalidStreamTypeException.add_member(:source, Shapes::ShapeRef.new(shape: FirehoseSource, location_name: "source")) - InvalidStreamTypeException.struct_class = Types::InvalidStreamTypeException - KMSEncryptionConfig.add_member(:awskms_key_arn, Shapes::ShapeRef.new(shape: AWSKMSKeyARN, required: true, location_name: "AWSKMSKeyARN")) KMSEncryptionConfig.struct_class = Types::KMSEncryptionConfig @@ -916,12 +880,6 @@ module ClientApi ServiceUnavailableException.add_member(:message, Shapes::ShapeRef.new(shape: ErrorMessage, location_name: "message")) ServiceUnavailableException.struct_class = Types::ServiceUnavailableException - SessionCredentials.add_member(:access_key_id, Shapes::ShapeRef.new(shape: AccessKeyId, required: true, location_name: "AccessKeyId")) - SessionCredentials.add_member(:secret_access_key, Shapes::ShapeRef.new(shape: SecretAccessKey, required: true, location_name: "SecretAccessKey")) - SessionCredentials.add_member(:session_token, Shapes::ShapeRef.new(shape: SessionToken, required: true, location_name: "SessionToken")) - SessionCredentials.add_member(:expiration, Shapes::ShapeRef.new(shape: Timestamp, required: true, location_name: "Expiration")) - SessionCredentials.struct_class = Types::SessionCredentials - SnowflakeDestinationConfiguration.add_member(:account_url, Shapes::ShapeRef.new(shape: SnowflakeAccountUrl, required: true, location_name: "AccountUrl")) SnowflakeDestinationConfiguration.add_member(:private_key, Shapes::ShapeRef.new(shape: SnowflakePrivateKey, required: true, location_name: "PrivateKey")) SnowflakeDestinationConfiguration.add_member(:key_passphrase, Shapes::ShapeRef.new(shape: SnowflakeKeyPassphrase, location_name: "KeyPassphrase")) @@ -1063,43 +1021,6 @@ module ClientApi TagKeyList.member = Shapes::ShapeRef.new(shape: TagKey) - TagrisAccessDeniedException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisAccessDeniedException.struct_class = Types::TagrisAccessDeniedException - - TagrisInternalServiceException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisInternalServiceException.struct_class = Types::TagrisInternalServiceException - - TagrisInvalidArnException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisInvalidArnException.add_member(:sweep_list_item, Shapes::ShapeRef.new(shape: TagrisSweepListItem, location_name: "sweepListItem")) - TagrisInvalidArnException.struct_class = Types::TagrisInvalidArnException - - TagrisInvalidParameterException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisInvalidParameterException.struct_class = Types::TagrisInvalidParameterException - - TagrisPartialResourcesExistResultsException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisPartialResourcesExistResultsException.add_member(:resource_existence_information, Shapes::ShapeRef.new(shape: TagrisSweepListResult, location_name: "resourceExistenceInformation")) - TagrisPartialResourcesExistResultsException.struct_class = Types::TagrisPartialResourcesExistResultsException - - TagrisSweepList.member = Shapes::ShapeRef.new(shape: TagrisSweepListItem) - - TagrisSweepListItem.add_member(:tagris_account_id, Shapes::ShapeRef.new(shape: TagrisAccountId, location_name: "TagrisAccountId")) - TagrisSweepListItem.add_member(:tagris_amazon_resource_name, Shapes::ShapeRef.new(shape: TagrisAmazonResourceName, location_name: "TagrisAmazonResourceName")) - TagrisSweepListItem.add_member(:tagris_internal_id, Shapes::ShapeRef.new(shape: TagrisInternalId, location_name: "TagrisInternalId")) - TagrisSweepListItem.add_member(:tagris_version, Shapes::ShapeRef.new(shape: TagrisVersion, location_name: "TagrisVersion")) - TagrisSweepListItem.struct_class = Types::TagrisSweepListItem - - TagrisSweepListResult.key = Shapes::ShapeRef.new(shape: TagrisAmazonResourceName) - TagrisSweepListResult.value = Shapes::ShapeRef.new(shape: TagrisStatus) - - TagrisThrottledException.add_member(:message, Shapes::ShapeRef.new(shape: TagrisExceptionMessage, location_name: "message")) - TagrisThrottledException.struct_class = Types::TagrisThrottledException - - TagrisVerifyResourcesExistInput.add_member(:tagris_sweep_list, Shapes::ShapeRef.new(shape: TagrisSweepList, required: true, location_name: "TagrisSweepList")) - TagrisVerifyResourcesExistInput.struct_class = Types::TagrisVerifyResourcesExistInput - - TagrisVerifyResourcesExistOutput.add_member(:tagris_sweep_list_result, Shapes::ShapeRef.new(shape: TagrisSweepListResult, required: true, location_name: "TagrisSweepListResult")) - TagrisVerifyResourcesExistOutput.struct_class = Types::TagrisVerifyResourcesExistOutput - UntagDeliveryStreamInput.add_member(:delivery_stream_name, Shapes::ShapeRef.new(shape: DeliveryStreamName, required: true, location_name: "DeliveryStreamName")) UntagDeliveryStreamInput.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location_name: "TagKeys")) UntagDeliveryStreamInput.struct_class = Types::UntagDeliveryStreamInput @@ -1183,18 +1104,6 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) end) - api.add_operation(:get_kinesis_stream, Seahorse::Model::Operation.new.tap do |o| - o.name = "GetKinesisStream" - o.http_method = "POST" - o.http_request_uri = "/" - o.input = Shapes::ShapeRef.new(shape: GetKinesisStreamInput) - o.output = Shapes::ShapeRef.new(shape: GetKinesisStreamOutput) - o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) - o.errors << Shapes::ShapeRef.new(shape: InvalidArgumentException) - o.errors << Shapes::ShapeRef.new(shape: InvalidStreamTypeException) - o.errors << Shapes::ShapeRef.new(shape: InvalidKMSResourceException) - end) - api.add_operation(:list_delivery_streams, Seahorse::Model::Operation.new.tap do |o| o.name = "ListDeliveryStreams" o.http_method = "POST" @@ -1300,20 +1209,6 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) o.errors << Shapes::ShapeRef.new(shape: ConcurrentModificationException) end) - - api.add_operation(:verify_resources_exist_for_tagris, Seahorse::Model::Operation.new.tap do |o| - o.name = "VerifyResourcesExistForTagris" - o.http_method = "POST" - o.http_request_uri = "/" - o.input = Shapes::ShapeRef.new(shape: TagrisVerifyResourcesExistInput) - o.output = Shapes::ShapeRef.new(shape: TagrisVerifyResourcesExistOutput) - o.errors << Shapes::ShapeRef.new(shape: TagrisAccessDeniedException) - o.errors << Shapes::ShapeRef.new(shape: TagrisInternalServiceException) - o.errors << Shapes::ShapeRef.new(shape: TagrisInvalidArnException) - o.errors << Shapes::ShapeRef.new(shape: TagrisInvalidParameterException) - o.errors << Shapes::ShapeRef.new(shape: TagrisPartialResourcesExistResultsException) - o.errors << Shapes::ShapeRef.new(shape: TagrisThrottledException) - end) end end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/endpoints.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/endpoints.rb index 604b56cb3cd..b337c007cb7 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/endpoints.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/endpoints.rb @@ -54,20 +54,6 @@ def self.build(context) end end - class GetKinesisStream - def self.build(context) - unless context.config.regional_endpoint - endpoint = context.config.endpoint.to_s - end - Aws::Firehose::EndpointParameters.new( - region: context.config.region, - use_dual_stack: context.config.use_dualstack_endpoint, - use_fips: context.config.use_fips_endpoint, - endpoint: endpoint, - ) - end - end - class ListDeliveryStreams def self.build(context) unless context.config.regional_endpoint @@ -194,19 +180,5 @@ def self.build(context) end end - class VerifyResourcesExistForTagris - def self.build(context) - unless context.config.regional_endpoint - endpoint = context.config.endpoint.to_s - end - Aws::Firehose::EndpointParameters.new( - region: context.config.region, - use_dual_stack: context.config.use_dualstack_endpoint, - use_fips: context.config.use_fips_endpoint, - endpoint: endpoint, - ) - end - end - end end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/errors.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/errors.rb index eaa4917d31a..256fb63d303 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/errors.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/errors.rb @@ -31,17 +31,10 @@ module Aws::Firehose # * {InvalidArgumentException} # * {InvalidKMSResourceException} # * {InvalidSourceException} - # * {InvalidStreamTypeException} # * {LimitExceededException} # * {ResourceInUseException} # * {ResourceNotFoundException} # * {ServiceUnavailableException} - # * {TagrisAccessDeniedException} - # * {TagrisInternalServiceException} - # * {TagrisInvalidArnException} - # * {TagrisInvalidParameterException} - # * {TagrisPartialResourcesExistResultsException} - # * {TagrisThrottledException} # # Additionally, error classes are dynamically generated for service errors based on the error code # if they are not defined above. @@ -119,26 +112,6 @@ def message end end - class InvalidStreamTypeException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::InvalidStreamTypeException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - - # @return [String] - def source - @data[:source] - end - end - class LimitExceededException < ServiceError # @param [Seahorse::Client::RequestContext] context @@ -199,105 +172,5 @@ def message end end - class TagrisAccessDeniedException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisAccessDeniedException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - end - - class TagrisInternalServiceException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisInternalServiceException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - end - - class TagrisInvalidArnException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisInvalidArnException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - - # @return [String] - def sweep_list_item - @data[:sweep_list_item] - end - end - - class TagrisInvalidParameterException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisInvalidParameterException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - end - - class TagrisPartialResourcesExistResultsException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisPartialResourcesExistResultsException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - - # @return [String] - def resource_existence_information - @data[:resource_existence_information] - end - end - - class TagrisThrottledException < ServiceError - - # @param [Seahorse::Client::RequestContext] context - # @param [String] message - # @param [Aws::Firehose::Types::TagrisThrottledException] data - def initialize(context, message, data = Aws::EmptyStructure.new) - super(context, message, data) - end - - # @return [String] - def message - @message || @data[:message] - end - end - end end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/plugins/endpoints.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/plugins/endpoints.rb index 91e0872461d..637f26f52bb 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/plugins/endpoints.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/plugins/endpoints.rb @@ -64,8 +64,6 @@ def parameters_for_operation(context) Aws::Firehose::Endpoints::DeleteDeliveryStream.build(context) when :describe_delivery_stream Aws::Firehose::Endpoints::DescribeDeliveryStream.build(context) - when :get_kinesis_stream - Aws::Firehose::Endpoints::GetKinesisStream.build(context) when :list_delivery_streams Aws::Firehose::Endpoints::ListDeliveryStreams.build(context) when :list_tags_for_delivery_stream @@ -84,8 +82,6 @@ def parameters_for_operation(context) Aws::Firehose::Endpoints::UntagDeliveryStream.build(context) when :update_destination Aws::Firehose::Endpoints::UpdateDestination.build(context) - when :verify_resources_exist_for_tagris - Aws::Firehose::Endpoints::VerifyResourcesExistForTagris.build(context) end end end diff --git a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/types.rb b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/types.rb index 8e26555a22f..28ae7ab1462 100644 --- a/gems/aws-sdk-firehose/lib/aws-sdk-firehose/types.rb +++ b/gems/aws-sdk-firehose/lib/aws-sdk-firehose/types.rb @@ -2095,32 +2095,6 @@ class FailureDescription < Struct.new( include Aws::Structure end - # @!attribute [rw] delivery_stream_arn - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStreamInput AWS API Documentation - # - class GetKinesisStreamInput < Struct.new( - :delivery_stream_arn) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] kinesis_stream_arn - # @return [String] - # - # @!attribute [rw] credentials_for_reading_kinesis_stream - # @return [Types::SessionCredentials] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/GetKinesisStreamOutput AWS API Documentation - # - class GetKinesisStreamOutput < Struct.new( - :kinesis_stream_arn, - :credentials_for_reading_kinesis_stream) - SENSITIVE = [:credentials_for_reading_kinesis_stream] - include Aws::Structure - end - # The native Hive / HCatalog JsonSerDe. Used by Firehose for # deserializing data, which means converting it from the JSON format in # preparation for serializing it to the Parquet or ORC format. This is @@ -2579,21 +2553,6 @@ class InvalidSourceException < Struct.new( include Aws::Structure end - # @!attribute [rw] message - # @return [String] - # - # @!attribute [rw] source - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/InvalidStreamTypeException AWS API Documentation - # - class InvalidStreamTypeException < Struct.new( - :message, - :source) - SENSITIVE = [] - include Aws::Structure - end - # Describes an encryption key for a destination in Amazon S3. # # @!attribute [rw] awskms_key_arn @@ -3896,29 +3855,6 @@ class ServiceUnavailableException < Struct.new( include Aws::Structure end - # @!attribute [rw] access_key_id - # @return [String] - # - # @!attribute [rw] secret_access_key - # @return [String] - # - # @!attribute [rw] session_token - # @return [String] - # - # @!attribute [rw] expiration - # @return [Time] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/SessionCredentials AWS API Documentation - # - class SessionCredentials < Struct.new( - :access_key_id, - :secret_access_key, - :session_token, - :expiration) - SENSITIVE = [:access_key_id, :secret_access_key, :session_token] - include Aws::Structure - end - # Configure Snowflake destination # # @!attribute [rw] account_url @@ -4736,125 +4672,6 @@ class TagDeliveryStreamInput < Struct.new( # class TagDeliveryStreamOutput < Aws::EmptyStructure; end - # @!attribute [rw] message - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisAccessDeniedException AWS API Documentation - # - class TagrisAccessDeniedException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisInternalServiceException AWS API Documentation - # - class TagrisInternalServiceException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message - # @return [String] - # - # @!attribute [rw] sweep_list_item - # @return [Types::TagrisSweepListItem] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisInvalidArnException AWS API Documentation - # - class TagrisInvalidArnException < Struct.new( - :message, - :sweep_list_item) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisInvalidParameterException AWS API Documentation - # - class TagrisInvalidParameterException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message - # @return [String] - # - # @!attribute [rw] resource_existence_information - # @return [Hash] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisPartialResourcesExistResultsException AWS API Documentation - # - class TagrisPartialResourcesExistResultsException < Struct.new( - :message, - :resource_existence_information) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] tagris_account_id - # @return [String] - # - # @!attribute [rw] tagris_amazon_resource_name - # @return [String] - # - # @!attribute [rw] tagris_internal_id - # @return [String] - # - # @!attribute [rw] tagris_version - # @return [Integer] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisSweepListItem AWS API Documentation - # - class TagrisSweepListItem < Struct.new( - :tagris_account_id, - :tagris_amazon_resource_name, - :tagris_internal_id, - :tagris_version) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] message - # @return [String] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisThrottledException AWS API Documentation - # - class TagrisThrottledException < Struct.new( - :message) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] tagris_sweep_list - # @return [Array] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisVerifyResourcesExistInput AWS API Documentation - # - class TagrisVerifyResourcesExistInput < Struct.new( - :tagris_sweep_list) - SENSITIVE = [] - include Aws::Structure - end - - # @!attribute [rw] tagris_sweep_list_result - # @return [Hash] - # - # @see http://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagrisVerifyResourcesExistOutput AWS API Documentation - # - class TagrisVerifyResourcesExistOutput < Struct.new( - :tagris_sweep_list_result) - SENSITIVE = [] - include Aws::Structure - end - # @!attribute [rw] delivery_stream_name # The name of the delivery stream. # @return [String] diff --git a/gems/aws-sdk-firehose/sig/client.rbs b/gems/aws-sdk-firehose/sig/client.rbs index 6b467a1302a..d2ba7c56416 100644 --- a/gems/aws-sdk-firehose/sig/client.rbs +++ b/gems/aws-sdk-firehose/sig/client.rbs @@ -717,17 +717,6 @@ module Aws ) -> _DescribeDeliveryStreamResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeDeliveryStreamResponseSuccess - interface _GetKinesisStreamResponseSuccess - include ::Seahorse::Client::_ResponseSuccess[Types::GetKinesisStreamOutput] - def kinesis_stream_arn: () -> ::String - def credentials_for_reading_kinesis_stream: () -> Types::SessionCredentials - end - # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Firehose/Client.html#get_kinesis_stream-instance_method - def get_kinesis_stream: ( - delivery_stream_arn: ::String - ) -> _GetKinesisStreamResponseSuccess - | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetKinesisStreamResponseSuccess - interface _ListDeliveryStreamsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListDeliveryStreamsOutput] def delivery_stream_names: () -> ::Array[::String] @@ -1410,23 +1399,6 @@ module Aws } ) -> _UpdateDestinationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateDestinationResponseSuccess - - interface _VerifyResourcesExistForTagrisResponseSuccess - include ::Seahorse::Client::_ResponseSuccess[Types::TagrisVerifyResourcesExistOutput] - def tagris_sweep_list_result: () -> ::Hash[::String, ("ACTIVE" | "NOT_ACTIVE")] - end - # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Firehose/Client.html#verify_resources_exist_for_tagris-instance_method - def verify_resources_exist_for_tagris: ( - tagris_sweep_list: Array[ - { - tagris_account_id: ::String?, - tagris_amazon_resource_name: ::String?, - tagris_internal_id: ::String?, - tagris_version: ::Integer? - }, - ] - ) -> _VerifyResourcesExistForTagrisResponseSuccess - | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _VerifyResourcesExistForTagrisResponseSuccess end end end diff --git a/gems/aws-sdk-firehose/sig/errors.rbs b/gems/aws-sdk-firehose/sig/errors.rbs index 41ef81d2288..6dfd72cfd15 100644 --- a/gems/aws-sdk-firehose/sig/errors.rbs +++ b/gems/aws-sdk-firehose/sig/errors.rbs @@ -25,10 +25,6 @@ module Aws def code: () -> ::String def message: () -> ::String end - class InvalidStreamTypeException < ::Aws::Errors::ServiceError - def message: () -> ::String - def source: () -> ::String - end class LimitExceededException < ::Aws::Errors::ServiceError def message: () -> ::String end @@ -41,26 +37,6 @@ module Aws class ServiceUnavailableException < ::Aws::Errors::ServiceError def message: () -> ::String end - class TagrisAccessDeniedException < ::Aws::Errors::ServiceError - def message: () -> ::String - end - class TagrisInternalServiceException < ::Aws::Errors::ServiceError - def message: () -> ::String - end - class TagrisInvalidArnException < ::Aws::Errors::ServiceError - def message: () -> ::String - def sweep_list_item: () -> ::String - end - class TagrisInvalidParameterException < ::Aws::Errors::ServiceError - def message: () -> ::String - end - class TagrisPartialResourcesExistResultsException < ::Aws::Errors::ServiceError - def message: () -> ::String - def resource_existence_information: () -> ::String - end - class TagrisThrottledException < ::Aws::Errors::ServiceError - def message: () -> ::String - end end end end diff --git a/gems/aws-sdk-firehose/sig/types.rbs b/gems/aws-sdk-firehose/sig/types.rbs index 1d54af9aa41..a2ce922c02b 100644 --- a/gems/aws-sdk-firehose/sig/types.rbs +++ b/gems/aws-sdk-firehose/sig/types.rbs @@ -399,17 +399,6 @@ module Aws::Firehose SENSITIVE: [] end - class GetKinesisStreamInput - attr_accessor delivery_stream_arn: ::String - SENSITIVE: [] - end - - class GetKinesisStreamOutput - attr_accessor kinesis_stream_arn: ::String - attr_accessor credentials_for_reading_kinesis_stream: Types::SessionCredentials - SENSITIVE: [:credentials_for_reading_kinesis_stream] - end - class HiveJsonSerDe attr_accessor timestamp_formats: ::Array[::String] SENSITIVE: [] @@ -512,12 +501,6 @@ module Aws::Firehose SENSITIVE: [] end - class InvalidStreamTypeException - attr_accessor message: ::String - attr_accessor source: ::String - SENSITIVE: [] - end - class KMSEncryptionConfig attr_accessor awskms_key_arn: ::String SENSITIVE: [] @@ -794,14 +777,6 @@ module Aws::Firehose SENSITIVE: [] end - class SessionCredentials - attr_accessor access_key_id: ::String - attr_accessor secret_access_key: ::String - attr_accessor session_token: ::String - attr_accessor expiration: ::Time - SENSITIVE: [:access_key_id, :secret_access_key, :session_token] - end - class SnowflakeDestinationConfiguration attr_accessor account_url: ::String attr_accessor private_key: ::String @@ -972,56 +947,6 @@ module Aws::Firehose class TagDeliveryStreamOutput < Aws::EmptyStructure end - class TagrisAccessDeniedException - attr_accessor message: ::String - SENSITIVE: [] - end - - class TagrisInternalServiceException - attr_accessor message: ::String - SENSITIVE: [] - end - - class TagrisInvalidArnException - attr_accessor message: ::String - attr_accessor sweep_list_item: Types::TagrisSweepListItem - SENSITIVE: [] - end - - class TagrisInvalidParameterException - attr_accessor message: ::String - SENSITIVE: [] - end - - class TagrisPartialResourcesExistResultsException - attr_accessor message: ::String - attr_accessor resource_existence_information: ::Hash[::String, ("ACTIVE" | "NOT_ACTIVE")] - SENSITIVE: [] - end - - class TagrisSweepListItem - attr_accessor tagris_account_id: ::String - attr_accessor tagris_amazon_resource_name: ::String - attr_accessor tagris_internal_id: ::String - attr_accessor tagris_version: ::Integer - SENSITIVE: [] - end - - class TagrisThrottledException - attr_accessor message: ::String - SENSITIVE: [] - end - - class TagrisVerifyResourcesExistInput - attr_accessor tagris_sweep_list: ::Array[Types::TagrisSweepListItem] - SENSITIVE: [] - end - - class TagrisVerifyResourcesExistOutput - attr_accessor tagris_sweep_list_result: ::Hash[::String, ("ACTIVE" | "NOT_ACTIVE")] - SENSITIVE: [] - end - class UntagDeliveryStreamInput attr_accessor delivery_stream_name: ::String attr_accessor tag_keys: ::Array[::String] diff --git a/gems/aws-sdk-lambda/CHANGELOG.md b/gems/aws-sdk-lambda/CHANGELOG.md index ea4b4de2a18..168ce0f257a 100644 --- a/gems/aws-sdk-lambda/CHANGELOG.md +++ b/gems/aws-sdk-lambda/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.116.0 (2024-02-20) +------------------ + +* Feature - Add .NET 8 (dotnet8) Runtime support to AWS Lambda. + 1.115.0 (2024-02-16) ------------------ diff --git a/gems/aws-sdk-lambda/VERSION b/gems/aws-sdk-lambda/VERSION index 97ee99fccc8..10c8812fe5e 100644 --- a/gems/aws-sdk-lambda/VERSION +++ b/gems/aws-sdk-lambda/VERSION @@ -1 +1 @@ -1.115.0 +1.116.0 diff --git a/gems/aws-sdk-lambda/lib/aws-sdk-lambda.rb b/gems/aws-sdk-lambda/lib/aws-sdk-lambda.rb index c3523ca4d91..8a6c57d48c1 100644 --- a/gems/aws-sdk-lambda/lib/aws-sdk-lambda.rb +++ b/gems/aws-sdk-lambda/lib/aws-sdk-lambda.rb @@ -54,6 +54,6 @@ # @!group service module Aws::Lambda - GEM_VERSION = '1.115.0' + GEM_VERSION = '1.116.0' end diff --git a/gems/aws-sdk-lambda/lib/aws-sdk-lambda/client.rb b/gems/aws-sdk-lambda/lib/aws-sdk-lambda/client.rb index d66abd7d3ed..c6a888b5df6 100644 --- a/gems/aws-sdk-lambda/lib/aws-sdk-lambda/client.rb +++ b/gems/aws-sdk-lambda/lib/aws-sdk-lambda/client.rb @@ -1420,7 +1420,7 @@ def create_event_source_mapping(params = {}, options = {}) # # resp = client.create_function({ # function_name: "FunctionName", # required - # runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 + # runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, dotnet8, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 # role: "RoleArn", # required # handler: "Handler", # code: { # required @@ -1487,7 +1487,7 @@ def create_event_source_mapping(params = {}, options = {}) # # resp.function_name #=> String # resp.function_arn #=> String - # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.role #=> String # resp.handler #=> String # resp.code_size #=> Integer @@ -2365,7 +2365,7 @@ def get_event_source_mapping(params = {}, options = {}) # # resp.configuration.function_name #=> String # resp.configuration.function_arn #=> String - # resp.configuration.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.configuration.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.configuration.role #=> String # resp.configuration.handler #=> String # resp.configuration.code_size #=> Integer @@ -2612,7 +2612,7 @@ def get_function_concurrency(params = {}, options = {}) # # resp.function_name #=> String # resp.function_arn #=> String - # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.role #=> String # resp.handler #=> String # resp.code_size #=> Integer @@ -2859,7 +2859,7 @@ def get_function_url_config(params = {}, options = {}) # resp.created_date #=> Time # resp.version #=> Integer # resp.compatible_runtimes #=> Array - # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.license_info #=> String # resp.compatible_architectures #=> Array # resp.compatible_architectures[0] #=> String, one of "x86_64", "arm64" @@ -2914,7 +2914,7 @@ def get_layer_version(params = {}, options = {}) # resp.created_date #=> Time # resp.version #=> Integer # resp.compatible_runtimes #=> Array - # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.license_info #=> String # resp.compatible_architectures #=> Array # resp.compatible_architectures[0] #=> String, one of "x86_64", "arm64" @@ -3973,7 +3973,7 @@ def list_function_url_configs(params = {}, options = {}) # resp.functions #=> Array # resp.functions[0].function_name #=> String # resp.functions[0].function_arn #=> String - # resp.functions[0].runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.functions[0].runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.functions[0].role #=> String # resp.functions[0].handler #=> String # resp.functions[0].code_size #=> Integer @@ -4135,7 +4135,7 @@ def list_functions_by_code_signing_config(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.list_layer_versions({ - # compatible_runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 + # compatible_runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, dotnet8, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 # layer_name: "LayerName", # required # marker: "String", # max_items: 1, @@ -4151,7 +4151,7 @@ def list_functions_by_code_signing_config(params = {}, options = {}) # resp.layer_versions[0].description #=> String # resp.layer_versions[0].created_date #=> Time # resp.layer_versions[0].compatible_runtimes #=> Array - # resp.layer_versions[0].compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.layer_versions[0].compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.layer_versions[0].license_info #=> String # resp.layer_versions[0].compatible_architectures #=> Array # resp.layer_versions[0].compatible_architectures[0] #=> String, one of "x86_64", "arm64" @@ -4210,7 +4210,7 @@ def list_layer_versions(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.list_layers({ - # compatible_runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 + # compatible_runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, dotnet8, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 # marker: "String", # max_items: 1, # compatible_architecture: "x86_64", # accepts x86_64, arm64 @@ -4227,7 +4227,7 @@ def list_layer_versions(params = {}, options = {}) # resp.layers[0].latest_matching_version.description #=> String # resp.layers[0].latest_matching_version.created_date #=> Time # resp.layers[0].latest_matching_version.compatible_runtimes #=> Array - # resp.layers[0].latest_matching_version.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.layers[0].latest_matching_version.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.layers[0].latest_matching_version.license_info #=> String # resp.layers[0].latest_matching_version.compatible_architectures #=> Array # resp.layers[0].latest_matching_version.compatible_architectures[0] #=> String, one of "x86_64", "arm64" @@ -4389,7 +4389,7 @@ def list_tags(params = {}, options = {}) # resp.versions #=> Array # resp.versions[0].function_name #=> String # resp.versions[0].function_arn #=> String - # resp.versions[0].runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.versions[0].runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.versions[0].role #=> String # resp.versions[0].handler #=> String # resp.versions[0].code_size #=> Integer @@ -4536,7 +4536,7 @@ def list_versions_by_function(params = {}, options = {}) # s3_object_version: "S3ObjectVersion", # zip_file: "data", # }, - # compatible_runtimes: ["nodejs"], # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 + # compatible_runtimes: ["nodejs"], # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, dotnet8, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 # license_info: "LicenseInfo", # compatible_architectures: ["x86_64"], # accepts x86_64, arm64 # }) @@ -4554,7 +4554,7 @@ def list_versions_by_function(params = {}, options = {}) # resp.created_date #=> Time # resp.version #=> Integer # resp.compatible_runtimes #=> Array - # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.compatible_runtimes[0] #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.license_info #=> String # resp.compatible_architectures #=> Array # resp.compatible_architectures[0] #=> String, one of "x86_64", "arm64" @@ -4667,7 +4667,7 @@ def publish_layer_version(params = {}, options = {}) # # resp.function_name #=> String # resp.function_arn #=> String - # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.role #=> String # resp.handler #=> String # resp.code_size #=> Integer @@ -5874,7 +5874,7 @@ def update_event_source_mapping(params = {}, options = {}) # # resp.function_name #=> String # resp.function_arn #=> String - # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.role #=> String # resp.handler #=> String # resp.code_size #=> Integer @@ -6181,7 +6181,7 @@ def update_function_code(params = {}, options = {}) # "EnvironmentVariableName" => "EnvironmentVariableValue", # }, # }, - # runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 + # runtime: "nodejs", # accepts nodejs, nodejs4.3, nodejs6.10, nodejs8.10, nodejs10.x, nodejs12.x, nodejs14.x, nodejs16.x, java8, java8.al2, java11, python2.7, python3.6, python3.7, python3.8, python3.9, dotnetcore1.0, dotnetcore2.0, dotnetcore2.1, dotnetcore3.1, dotnet6, dotnet8, nodejs4.3-edge, go1.x, ruby2.5, ruby2.7, provided, provided.al2, nodejs18.x, python3.10, java17, ruby3.2, python3.11, nodejs20.x, provided.al2023, python3.12, java21 # dead_letter_config: { # target_arn: "ResourceArn", # }, @@ -6220,7 +6220,7 @@ def update_function_code(params = {}, options = {}) # # resp.function_name #=> String # resp.function_arn #=> String - # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" + # resp.runtime #=> String, one of "nodejs", "nodejs4.3", "nodejs6.10", "nodejs8.10", "nodejs10.x", "nodejs12.x", "nodejs14.x", "nodejs16.x", "java8", "java8.al2", "java11", "python2.7", "python3.6", "python3.7", "python3.8", "python3.9", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", "dotnet6", "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", "ruby2.7", "provided", "provided.al2", "nodejs18.x", "python3.10", "java17", "ruby3.2", "python3.11", "nodejs20.x", "provided.al2023", "python3.12", "java21" # resp.role #=> String # resp.handler #=> String # resp.code_size #=> Integer @@ -6507,7 +6507,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-lambda' - context[:gem_version] = '1.115.0' + context[:gem_version] = '1.116.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-lambda/lib/aws-sdk-lambda/types.rb b/gems/aws-sdk-lambda/lib/aws-sdk-lambda/types.rb index d7791444aca..96d09a5fe6b 100644 --- a/gems/aws-sdk-lambda/lib/aws-sdk-lambda/types.rb +++ b/gems/aws-sdk-lambda/lib/aws-sdk-lambda/types.rb @@ -1892,8 +1892,9 @@ class EphemeralStorage < Struct.new( # @return [String] # # @!attribute [rw] destination_config - # (Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon - # SNS topic destination for discarded records. + # (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache + # Kafka event sources only) A configuration object that specifies the + # destination of an event after Lambda processes it. # @return [Types::DestinationConfig] # # @!attribute [rw] topics @@ -4676,8 +4677,8 @@ class LoggingConfig < Struct.new( # SQS queue as the destination. # # To retain records of failed invocations from [self-managed Kafka][3] - # or [Amazon MSK][4], you can configure an Amazon SNS topic or Amazon - # SQS queue as the destination. + # or [Amazon MSK][4], you can configure an Amazon SNS topic, Amazon + # SQS queue, or Amazon S3 bucket as the destination. # # # diff --git a/gems/aws-sdk-lambda/sig/client.rbs b/gems/aws-sdk-lambda/sig/client.rbs index 44f2c66e9f4..a8dd4f7223f 100644 --- a/gems/aws-sdk-lambda/sig/client.rbs +++ b/gems/aws-sdk-lambda/sig/client.rbs @@ -241,7 +241,7 @@ module Aws include ::Seahorse::Client::_ResponseSuccess[Types::FunctionConfiguration] def function_name: () -> ::String def function_arn: () -> ::String - def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") def role: () -> ::String def handler: () -> ::String def code_size: () -> ::Integer @@ -279,7 +279,7 @@ module Aws # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Lambda/Client.html#create_function-instance_method def create_function: ( function_name: ::String, - ?runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), + ?runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), role: ::String, ?handler: ::String, code: { @@ -575,7 +575,7 @@ module Aws include ::Seahorse::Client::_ResponseSuccess[Types::FunctionConfiguration] def function_name: () -> ::String def function_arn: () -> ::String - def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") def role: () -> ::String def handler: () -> ::String def code_size: () -> ::Integer @@ -657,7 +657,7 @@ module Aws def description: () -> ::String def created_date: () -> ::Time def version: () -> ::Integer - def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] def license_info: () -> ::String def compatible_architectures: () -> ::Array[("x86_64" | "arm64")] end @@ -676,7 +676,7 @@ module Aws def description: () -> ::String def created_date: () -> ::Time def version: () -> ::Integer - def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] def license_info: () -> ::String def compatible_architectures: () -> ::Array[("x86_64" | "arm64")] end @@ -887,7 +887,7 @@ module Aws end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Lambda/Client.html#list_layer_versions-instance_method def list_layer_versions: ( - ?compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), + ?compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), layer_name: ::String, ?marker: ::String, ?max_items: ::Integer, @@ -902,7 +902,7 @@ module Aws end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Lambda/Client.html#list_layers-instance_method def list_layers: ( - ?compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), + ?compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), ?marker: ::String, ?max_items: ::Integer, ?compatible_architecture: ("x86_64" | "arm64") @@ -953,7 +953,7 @@ module Aws def description: () -> ::String def created_date: () -> ::Time def version: () -> ::Integer - def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + def compatible_runtimes: () -> ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] def license_info: () -> ::String def compatible_architectures: () -> ::Array[("x86_64" | "arm64")] end @@ -967,7 +967,7 @@ module Aws s3_object_version: ::String?, zip_file: ::String? }, - ?compatible_runtimes: Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")], + ?compatible_runtimes: Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")], ?license_info: ::String, ?compatible_architectures: Array[("x86_64" | "arm64")] ) -> _PublishLayerVersionResponseSuccess @@ -977,7 +977,7 @@ module Aws include ::Seahorse::Client::_ResponseSuccess[Types::FunctionConfiguration] def function_name: () -> ::String def function_arn: () -> ::String - def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") def role: () -> ::String def handler: () -> ::String def code_size: () -> ::Integer @@ -1251,7 +1251,7 @@ module Aws include ::Seahorse::Client::_ResponseSuccess[Types::FunctionConfiguration] def function_name: () -> ::String def function_arn: () -> ::String - def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") def role: () -> ::String def handler: () -> ::String def code_size: () -> ::Integer @@ -1305,7 +1305,7 @@ module Aws include ::Seahorse::Client::_ResponseSuccess[Types::FunctionConfiguration] def function_name: () -> ::String def function_arn: () -> ::String - def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + def runtime: () -> ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") def role: () -> ::String def handler: () -> ::String def code_size: () -> ::Integer @@ -1356,7 +1356,7 @@ module Aws ?environment: { variables: Hash[::String, ::String]? }, - ?runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), + ?runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21"), ?dead_letter_config: { target_arn: ::String? }, diff --git a/gems/aws-sdk-lambda/sig/types.rbs b/gems/aws-sdk-lambda/sig/types.rbs index 8343620bd7a..49dcf09f656 100644 --- a/gems/aws-sdk-lambda/sig/types.rbs +++ b/gems/aws-sdk-lambda/sig/types.rbs @@ -183,7 +183,7 @@ module Aws::Lambda class CreateFunctionRequest attr_accessor function_name: ::String - attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") attr_accessor role: ::String attr_accessor handler: ::String attr_accessor code: Types::FunctionCode @@ -443,7 +443,7 @@ module Aws::Lambda class FunctionConfiguration attr_accessor function_name: ::String attr_accessor function_arn: ::String - attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") attr_accessor role: ::String attr_accessor handler: ::String attr_accessor code_size: ::Integer @@ -624,7 +624,7 @@ module Aws::Lambda attr_accessor description: ::String attr_accessor created_date: ::Time attr_accessor version: ::Integer - attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] attr_accessor license_info: ::String attr_accessor compatible_architectures: ::Array[("x86_64" | "arm64")] SENSITIVE: [] @@ -848,7 +848,7 @@ module Aws::Lambda attr_accessor version: ::Integer attr_accessor description: ::String attr_accessor created_date: ::Time - attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] attr_accessor license_info: ::String attr_accessor compatible_architectures: ::Array[("x86_64" | "arm64")] SENSITIVE: [] @@ -955,7 +955,7 @@ module Aws::Lambda end class ListLayerVersionsRequest - attr_accessor compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + attr_accessor compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") attr_accessor layer_name: ::String attr_accessor marker: ::String attr_accessor max_items: ::Integer @@ -970,7 +970,7 @@ module Aws::Lambda end class ListLayersRequest - attr_accessor compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + attr_accessor compatible_runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") attr_accessor marker: ::String attr_accessor max_items: ::Integer attr_accessor compatible_architecture: ("x86_64" | "arm64") @@ -1070,7 +1070,7 @@ module Aws::Lambda attr_accessor layer_name: ::String attr_accessor description: ::String attr_accessor content: Types::LayerVersionContentInput - attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] attr_accessor license_info: ::String attr_accessor compatible_architectures: ::Array[("x86_64" | "arm64")] SENSITIVE: [] @@ -1083,7 +1083,7 @@ module Aws::Lambda attr_accessor description: ::String attr_accessor created_date: ::Time attr_accessor version: ::Integer - attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] + attr_accessor compatible_runtimes: ::Array[("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21")] attr_accessor license_info: ::String attr_accessor compatible_architectures: ::Array[("x86_64" | "arm64")] SENSITIVE: [] @@ -1384,7 +1384,7 @@ module Aws::Lambda attr_accessor memory_size: ::Integer attr_accessor vpc_config: Types::VpcConfig attr_accessor environment: Types::Environment - attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") + attr_accessor runtime: ("nodejs" | "nodejs4.3" | "nodejs6.10" | "nodejs8.10" | "nodejs10.x" | "nodejs12.x" | "nodejs14.x" | "nodejs16.x" | "java8" | "java8.al2" | "java11" | "python2.7" | "python3.6" | "python3.7" | "python3.8" | "python3.9" | "dotnetcore1.0" | "dotnetcore2.0" | "dotnetcore2.1" | "dotnetcore3.1" | "dotnet6" | "dotnet8" | "nodejs4.3-edge" | "go1.x" | "ruby2.5" | "ruby2.7" | "provided" | "provided.al2" | "nodejs18.x" | "python3.10" | "java17" | "ruby3.2" | "python3.11" | "nodejs20.x" | "provided.al2023" | "python3.12" | "java21") attr_accessor dead_letter_config: Types::DeadLetterConfig attr_accessor kms_key_arn: ::String attr_accessor tracing_config: Types::TracingConfig From 752c3fb7e82a079f6edcbce7455f2c00ad583e01 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Wed, 21 Feb 2024 19:12:08 +0000 Subject: [PATCH 3/8] Updated API models and rebuilt service gems. --- apis/iotevents/2018-07-27/api-2.json | 6 +- .../2018-07-27/endpoint-rule-set-1.json | 40 +- apis/lookoutequipment/2020-12-15/api-2.json | 23 +- apis/lookoutequipment/2020-12-15/docs-2.json | 10 + apis/medialive/2017-10-14/api-2.json | 163 +++- apis/medialive/2017-10-14/docs-2.json | 104 +-- apis/ssm/2014-11-06/api-2.json | 43 +- apis/ssm/2014-11-06/docs-2.json | 44 +- gems/aws-sdk-iotevents/CHANGELOG.md | 5 + gems/aws-sdk-iotevents/VERSION | 2 +- .../lib/aws-sdk-iotevents.rb | 2 +- .../lib/aws-sdk-iotevents/client.rb | 2 +- .../aws-sdk-iotevents/endpoint_provider.rb | 2 +- gems/aws-sdk-lookoutequipment/CHANGELOG.md | 5 + gems/aws-sdk-lookoutequipment/VERSION | 2 +- .../lib/aws-sdk-lookoutequipment.rb | 2 +- .../lib/aws-sdk-lookoutequipment/client.rb | 10 +- .../aws-sdk-lookoutequipment/client_api.rb | 6 + .../lib/aws-sdk-lookoutequipment/types.rb | 126 +++- gems/aws-sdk-lookoutequipment/sig/client.rbs | 3 + gems/aws-sdk-lookoutequipment/sig/types.rbs | 5 + gems/aws-sdk-medialive/CHANGELOG.md | 5 + gems/aws-sdk-medialive/VERSION | 2 +- .../lib/aws-sdk-medialive.rb | 2 +- .../lib/aws-sdk-medialive/client.rb | 714 +++++++++++++++++- .../lib/aws-sdk-medialive/client_api.rb | 47 ++ .../lib/aws-sdk-medialive/endpoints.rb | 14 + .../aws-sdk-medialive/plugins/endpoints.rb | 2 + .../lib/aws-sdk-medialive/types.rb | 124 ++- gems/aws-sdk-medialive/sig/client.rbs | 29 + gems/aws-sdk-medialive/sig/types.rbs | 29 + gems/aws-sdk-ssm/CHANGELOG.md | 5 + gems/aws-sdk-ssm/VERSION | 2 +- gems/aws-sdk-ssm/lib/aws-sdk-ssm.rb | 2 +- gems/aws-sdk-ssm/lib/aws-sdk-ssm/client.rb | 159 +++- .../aws-sdk-ssm/lib/aws-sdk-ssm/client_api.rb | 21 + gems/aws-sdk-ssm/lib/aws-sdk-ssm/errors.rb | 48 ++ gems/aws-sdk-ssm/lib/aws-sdk-ssm/types.rb | 139 +++- gems/aws-sdk-ssm/sig/client.rbs | 3 +- gems/aws-sdk-ssm/sig/errors.rbs | 9 + gems/aws-sdk-ssm/sig/types.rbs | 17 + 41 files changed, 1834 insertions(+), 144 deletions(-) diff --git a/apis/iotevents/2018-07-27/api-2.json b/apis/iotevents/2018-07-27/api-2.json index 0142c07e1dc..3807f0023b7 100644 --- a/apis/iotevents/2018-07-27/api-2.json +++ b/apis/iotevents/2018-07-27/api-2.json @@ -508,7 +508,7 @@ "AlarmModelArn":{"type":"string"}, "AlarmModelDescription":{ "type":"string", - "max":128 + "max":1024 }, "AlarmModelName":{ "type":"string", @@ -974,7 +974,7 @@ }, "DetectorModelDescription":{ "type":"string", - "max":128 + "max":1024 }, "DetectorModelName":{ "type":"string", @@ -1205,7 +1205,7 @@ }, "InputDescription":{ "type":"string", - "max":128 + "max":1024 }, "InputIdentifier":{ "type":"structure", diff --git a/apis/iotevents/2018-07-27/endpoint-rule-set-1.json b/apis/iotevents/2018-07-27/endpoint-rule-set-1.json index 678f9935ed1..62cd33544a1 100644 --- a/apis/iotevents/2018-07-27/endpoint-rule-set-1.json +++ b/apis/iotevents/2018-07-27/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/apis/lookoutequipment/2020-12-15/api-2.json b/apis/lookoutequipment/2020-12-15/api-2.json index 2f21783519e..8ac7a754915 100644 --- a/apis/lookoutequipment/2020-12-15/api-2.json +++ b/apis/lookoutequipment/2020-12-15/api-2.json @@ -952,7 +952,8 @@ "members":{ "InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"}, "InferenceSchedulerName":{"shape":"InferenceSchedulerName"}, - "Status":{"shape":"InferenceSchedulerStatus"} + "Status":{"shape":"InferenceSchedulerStatus"}, + "ModelQuality":{"shape":"ModelQuality"} } }, "CreateLabelGroupRequest":{ @@ -1411,7 +1412,8 @@ "AccumulatedInferenceDataStartTime":{"shape":"Timestamp"}, "AccumulatedInferenceDataEndTime":{"shape":"Timestamp"}, "RetrainingSchedulerStatus":{"shape":"RetrainingSchedulerStatus"}, - "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"} + "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"}, + "ModelQuality":{"shape":"ModelQuality"} } }, "DescribeModelVersionRequest":{ @@ -1461,7 +1463,8 @@ "AutoPromotionResult":{"shape":"AutoPromotionResult"}, "AutoPromotionResultReason":{"shape":"AutoPromotionResultReason"}, "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"}, - "ModelDiagnosticsResultsObject":{"shape":"S3Object"} + "ModelDiagnosticsResultsObject":{"shape":"S3Object"}, + "ModelQuality":{"shape":"ModelQuality"} } }, "DescribeResourcePolicyRequest":{ @@ -2200,6 +2203,14 @@ "MANUAL" ] }, + "ModelQuality":{ + "type":"string", + "enum":[ + "QUALITY_THRESHOLD_MET", + "CANNOT_DETERMINE_QUALITY", + "POOR_QUALITY_DETECTED" + ] + }, "ModelStatus":{ "type":"string", "enum":[ @@ -2229,7 +2240,8 @@ "LatestScheduledRetrainingStartTime":{"shape":"Timestamp"}, "NextScheduledRetrainingStartDate":{"shape":"Timestamp"}, "RetrainingSchedulerStatus":{"shape":"RetrainingSchedulerStatus"}, - "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"} + "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"}, + "ModelQuality":{"shape":"ModelQuality"} } }, "ModelVersion":{ @@ -2273,7 +2285,8 @@ "ModelVersionArn":{"shape":"ModelVersionArn"}, "CreatedAt":{"shape":"Timestamp"}, "Status":{"shape":"ModelVersionStatus"}, - "SourceType":{"shape":"ModelVersionSourceType"} + "SourceType":{"shape":"ModelVersionSourceType"}, + "ModelQuality":{"shape":"ModelQuality"} } }, "MonotonicValues":{ diff --git a/apis/lookoutequipment/2020-12-15/docs-2.json b/apis/lookoutequipment/2020-12-15/docs-2.json index dd35dccf9dc..6e19a1be3db 100644 --- a/apis/lookoutequipment/2020-12-15/docs-2.json +++ b/apis/lookoutequipment/2020-12-15/docs-2.json @@ -1151,6 +1151,16 @@ "UpdateRetrainingSchedulerRequest$PromoteMode": "

Indicates how the service will use new models. In MANAGED mode, new models will automatically be used for inference if they have better performance than the current model. In MANUAL mode, the new models will not be used until they are manually activated.

" } }, + "ModelQuality": { + "base": null, + "refs": { + "CreateInferenceSchedulerResponse$ModelQuality": "

Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

For information about using labels with your models, see Understanding labeling.

For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

", + "DescribeModelResponse$ModelQuality": "

Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

For information about using labels with your models, see Understanding labeling.

For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

", + "DescribeModelVersionResponse$ModelQuality": "

Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

For information about using labels with your models, see Understanding labeling.

For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

", + "ModelSummary$ModelQuality": "

Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

For information about using labels with your models, see Understanding labeling.

For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

", + "ModelVersionSummary$ModelQuality": "

Provides a quality assessment for a model that uses labels. If Lookout for Equipment determines that the model quality is poor based on training metrics, the value is POOR_QUALITY_DETECTED. Otherwise, the value is QUALITY_THRESHOLD_MET.

If the model is unlabeled, the model quality can't be assessed and the value of ModelQuality is CANNOT_DETERMINE_QUALITY. In this situation, you can get a model quality assessment by adding labels to the input dataset and retraining the model.

For information about improving the quality of a model, see Best practices with Amazon Lookout for Equipment.

" + } + }, "ModelStatus": { "base": null, "refs": { diff --git a/apis/medialive/2017-10-14/api-2.json b/apis/medialive/2017-10-14/api-2.json index 658008edd9f..a658733e851 100644 --- a/apis/medialive/2017-10-14/api-2.json +++ b/apis/medialive/2017-10-14/api-2.json @@ -2424,6 +2424,46 @@ "shape": "ConflictException" } ] + }, + "RestartChannelPipelines": { + "name": "RestartChannelPipelines", + "http": { + "method": "POST", + "requestUri": "/prod/channels/{channelId}/restartChannelPipelines", + "responseCode": 200 + }, + "input": { + "shape": "RestartChannelPipelinesRequest" + }, + "output": { + "shape": "RestartChannelPipelinesResponse" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "BadGatewayException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "GatewayTimeoutException" + }, + { + "shape": "TooManyRequestsException" + }, + { + "shape": "ConflictException" + } + ] } }, "shapes": { @@ -14913,22 +14953,26 @@ }, "InputDeviceThumbnail": { "type": "blob", - "streaming": true + "streaming": true, + "documentation": "The binary data for the thumbnail that the Link device has most recently sent to MediaLive." }, "AcceptHeader": { "type": "string", "enum": [ "image/jpeg" - ] + ], + "documentation": "The HTTP Accept header. Indicates the requested type fothe thumbnail." }, "ContentType": { "type": "string", "enum": [ "image/jpeg" - ] + ], + "documentation": "Specifies the media type of the thumbnail." }, "__timestamp": { - "type": "timestamp" + "type": "timestamp", + "documentation": "Placeholder documentation for __timestamp" }, "InputDeviceConfigurableAudioChannelPairConfig": { "type": "structure", @@ -14993,6 +15037,117 @@ "member": { "shape": "InputDeviceUhdAudioChannelPairConfig" } + }, + "ChannelPipelineIdToRestart": { + "type": "string", + "enum": [ + "PIPELINE_0", + "PIPELINE_1" + ] + }, + "RestartChannelPipelinesRequest": { + "type": "structure", + "members": { + "ChannelId": { + "shape": "__string", + "location": "uri", + "locationName": "channelId" + }, + "PipelineIds": { + "shape": "__listOfChannelPipelineIdToRestart", + "locationName": "pipelineIds" + } + }, + "required": [ + "ChannelId" + ] + }, + "RestartChannelPipelinesResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string", + "locationName": "arn" + }, + "CdiInputSpecification": { + "shape": "CdiInputSpecification", + "locationName": "cdiInputSpecification" + }, + "ChannelClass": { + "shape": "ChannelClass", + "locationName": "channelClass" + }, + "Destinations": { + "shape": "__listOfOutputDestination", + "locationName": "destinations" + }, + "EgressEndpoints": { + "shape": "__listOfChannelEgressEndpoint", + "locationName": "egressEndpoints" + }, + "EncoderSettings": { + "shape": "EncoderSettings", + "locationName": "encoderSettings" + }, + "Id": { + "shape": "__string", + "locationName": "id" + }, + "InputAttachments": { + "shape": "__listOfInputAttachment", + "locationName": "inputAttachments" + }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, + "LogLevel": { + "shape": "LogLevel", + "locationName": "logLevel" + }, + "Maintenance": { + "shape": "MaintenanceStatus", + "locationName": "maintenance" + }, + "MaintenanceStatus": { + "shape": "__string", + "locationName": "maintenanceStatus" + }, + "Name": { + "shape": "__string", + "locationName": "name" + }, + "PipelineDetails": { + "shape": "__listOfPipelineDetail", + "locationName": "pipelineDetails" + }, + "PipelinesRunningCount": { + "shape": "__integer", + "locationName": "pipelinesRunningCount" + }, + "RoleArn": { + "shape": "__string", + "locationName": "roleArn" + }, + "State": { + "shape": "ChannelState", + "locationName": "state" + }, + "Tags": { + "shape": "Tags", + "locationName": "tags" + }, + "Vpc": { + "shape": "VpcOutputSettingsDescription", + "locationName": "vpc" + } + } + }, + "__listOfChannelPipelineIdToRestart": { + "type": "list", + "member": { + "shape": "ChannelPipelineIdToRestart" + } } } } diff --git a/apis/medialive/2017-10-14/docs-2.json b/apis/medialive/2017-10-14/docs-2.json index 4fe210fef53..308b1b58338 100644 --- a/apis/medialive/2017-10-14/docs-2.json +++ b/apis/medialive/2017-10-14/docs-2.json @@ -65,7 +65,8 @@ "UpdateInputSecurityGroup": "Update an Input Security Group's Whilelists.", "UpdateMultiplex": "Updates a multiplex.", "UpdateMultiplexProgram": "Update a program in a multiplex.", - "UpdateReservation": "Update reservation." + "UpdateReservation": "Update reservation.", + "RestartChannelPipelines": "Restart pipelines in one channel that is currently running." }, "shapes": { "AacCodingMode": { @@ -166,7 +167,7 @@ "AccessibilityType": { "base": "Accessibility Type", "refs": { - "CaptionDescription$Accessibility": "Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds." + "CaptionDescription$Accessibility": "Indicates whether the caption track implements accessibility features such as written descriptions of spoken dialog, music, and sounds. This signaling is added to HLS output group and MediaPackage output group." } }, "AccountConfiguration": { @@ -596,7 +597,7 @@ } }, "CdiInputResolution": { - "base": "Maximum CDI input resolution; SD is 480i and 576i up to 30 frames-per-second (fps), HD is 720p up to 60 fps / 1080i up to 30 fps, FHD is 1080p up to 60 fps, UHD is 2160p up to 60 fps\n", + "base": "Maximum CDI input resolution; SD is 480i and 576i up to 30 frames-per-second (fps), HD is 720p up to 60 fps / 1080i up to 30 fps, FHD is 1080p up to 60 fps, UHD is 2160p up to 60 fps", "refs": { "CdiInputSpecification$Resolution": "Maximum CDI input resolution" } @@ -1752,7 +1753,7 @@ "InputClass": { "base": "A standard input has two sources and a single pipeline input only has one.", "refs": { - "Input$InputClass": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.\n" + "Input$InputClass": "STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails.\nSINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input." } }, "InputClippingSettings": { @@ -2005,7 +2006,7 @@ } }, "InputPreference": { - "base": "Input preference when deciding which input to make active when a previously failed input has recovered.\nIf \\\"EQUAL_INPUT_PREFERENCE\\\", then the active input will stay active as long as it is healthy.\nIf \\\"PRIMARY_INPUT_PREFERRED\\\", then always switch back to the primary input when it is healthy.\n", + "base": "Input preference when deciding which input to make active when a previously failed input has recovered.\nIf \\\"EQUAL_INPUT_PREFERENCE\\\", then the active input will stay active as long as it is healthy.\nIf \\\"PRIMARY_INPUT_PREFERRED\\\", then always switch back to the primary input when it is healthy.", "refs": { "AutomaticInputFailoverSettings$InputPreference": "Input preference when deciding which input to make active when a previously failed input has recovered." } @@ -2017,7 +2018,7 @@ } }, "InputResolution": { - "base": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines\n", + "base": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines", "refs": { "InputSpecification$Resolution": "Input resolution, categorized coarsely" } @@ -2066,9 +2067,9 @@ } }, "InputSourceType": { - "base": "There are two types of input sources, static and dynamic. If an input source is dynamic you can\nchange the source url of the input dynamically using an input switch action. Currently, two input types\nsupport a dynamic url at this time, MP4_FILE and TS_FILE. By default all input sources are static.\n", + "base": "There are two types of input sources, static and dynamic. If an input source is dynamic you can\nchange the source url of the input dynamically using an input switch action. Currently, two input types\nsupport a dynamic url at this time, MP4_FILE and TS_FILE. By default all input sources are static.", "refs": { - "Input$InputSourceType": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs.\n" + "Input$InputSourceType": "Certain pull input sources can be dynamic, meaning that they can have their URL's dynamically changes\nduring input switch actions. Presently, this functionality only works with MP4_FILE and TS_FILE inputs." } }, "InputSpecification": { @@ -2106,7 +2107,7 @@ } }, "InputVpcRequest": { - "base": "Settings for a private VPC Input.\nWhen this property is specified, the input destination addresses will be created in a VPC rather than with public Internet addresses.\nThis property requires setting the roleArn property on Input creation.\nNot compatible with the inputSecurityGroups property.\n", + "base": "Settings for a private VPC Input.\nWhen this property is specified, the input destination addresses will be created in a VPC rather than with public Internet addresses.\nThis property requires setting the roleArn property on Input creation.\nNot compatible with the inputSecurityGroups property.", "refs": { "CreateInput$Vpc": null } @@ -2840,7 +2841,7 @@ } }, "PreferredChannelPipeline": { - "base": "Indicates which pipeline is preferred by the multiplex for program ingest.\nIf set to \\\"PIPELINE_0\\\" or \\\"PIPELINE_1\\\" and an unhealthy ingest causes the multiplex to switch to the non-preferred pipeline,\nit will switch back once that ingest is healthy again. If set to \\\"CURRENTLY_ACTIVE\\\",\nit will not switch back to the other pipeline based on it recovering to a healthy state,\nit will only switch if the active pipeline becomes unhealthy.\n", + "base": "Indicates which pipeline is preferred by the multiplex for program ingest.\nIf set to \\\"PIPELINE_0\\\" or \\\"PIPELINE_1\\\" and an unhealthy ingest causes the multiplex to switch to the non-preferred pipeline,\nit will switch back once that ingest is healthy again. If set to \\\"CURRENTLY_ACTIVE\\\",\nit will not switch back to the other pipeline based on it recovering to a healthy state,\nit will only switch if the active pipeline becomes unhealthy.", "refs": { "MultiplexProgramSettings$PreferredChannelPipeline": "Indicates which pipeline is preferred by the multiplex for program ingest." } @@ -2933,7 +2934,7 @@ } }, "ReservationResolution": { - "base": "Resolution based on lines of vertical resolution; SD is less than 720 lines, HD is 720 to 1080 lines, FHD is 1080 lines, UHD is greater than 1080 lines\n", + "base": "Resolution based on lines of vertical resolution; SD is less than 720 lines, HD is 720 to 1080 lines, FHD is 1080 lines, UHD is greater than 1080 lines", "refs": { "ReservationResourceSpecification$Resolution": "Resolution, e.g. 'HD'" } @@ -3663,13 +3664,13 @@ } }, "VpcOutputSettings": { - "base": "The properties for a private VPC Output\nWhen this property is specified, the output egress addresses will be created in a user specified VPC\n", + "base": "The properties for a private VPC Output\nWhen this property is specified, the output egress addresses will be created in a user specified VPC", "refs": { "CreateChannel$Vpc": "Settings for the VPC outputs" } }, "VpcOutputSettingsDescription": { - "base": "The properties for a private VPC Output\n", + "base": "The properties for a private VPC Output", "refs": { "Channel$Vpc": "Settings for VPC output", "ChannelSummary$Vpc": "Settings for any VPC outputs." @@ -4437,8 +4438,8 @@ "__listOfInputSourceRequest": { "base": null, "refs": { - "CreateInput$Sources": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n", - "UpdateInput$Sources": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" + "CreateInput$Sources": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.", + "UpdateInput$Sources": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty." } }, "__listOfInputWhitelistRule": { @@ -4462,8 +4463,8 @@ "__listOfMediaConnectFlowRequest": { "base": null, "refs": { - "CreateInput$MediaConnectFlows": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n", - "UpdateInput$MediaConnectFlows": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.\n" + "CreateInput$MediaConnectFlows": "A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues.", + "UpdateInput$MediaConnectFlows": "A list of the MediaConnect Flow ARNs that you want to use as the source of the input. You can specify as few as one\nFlow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a\nseparate Availability Zone as this ensures your EML input is redundant to AZ issues." } }, "__listOfMediaPackageOutputDestinationSettings": { @@ -4511,8 +4512,8 @@ "__listOfOutputDestination": { "base": null, "refs": { - "Channel$Destinations": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n", - "ChannelSummary$Destinations": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.\n", + "Channel$Destinations": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.", + "ChannelSummary$Destinations": "A list of destinations of the channel. For UDP outputs, there is one\ndestination per output. For other types (HLS, for example), there is\none destination per packager.", "CreateChannel$Destinations": null, "UpdateChannel$Destinations": "A list of output destinations for this channel.", "UpdateChannelClass$Destinations": "A list of output destinations for this channel." @@ -4634,8 +4635,8 @@ "InputPrepareScheduleActionSettings$UrlPath": "The value for the variable portion of the URL for the dynamic input, for this instance of the input. Each time you use the same dynamic input in an input switch action, you can provide a different value, in order to connect the input to a different content source.", "InputSecurityGroup$Inputs": "The list of inputs currently using this Input Security Group.", "InputSwitchScheduleActionSettings$UrlPath": "The value for the variable portion of the URL for the dynamic input, for this instance of the input. Each time you use the same dynamic input in an input switch action, you can provide a different value, in order to connect the input to a different content source.", - "InputVpcRequest$SecurityGroupIds": "A list of up to 5 EC2 VPC security group IDs to attach to the Input VPC network interfaces.\nRequires subnetIds. If none are specified then the VPC default security group will be used.\n", - "InputVpcRequest$SubnetIds": "A list of 2 VPC subnet IDs from the same VPC.\nSubnet IDs must be mapped to two unique availability zones (AZ).\n", + "InputVpcRequest$SecurityGroupIds": "A list of up to 5 EC2 VPC security group IDs to attach to the Input VPC network interfaces.\nRequires subnetIds. If none are specified then the VPC default security group will be used.", + "InputVpcRequest$SubnetIds": "A list of 2 VPC subnet IDs from the same VPC.\nSubnet IDs must be mapped to two unique availability zones (AZ).", "Multiplex$AvailabilityZones": "A list of availability zones for the multiplex.", "MultiplexSummary$AvailabilityZones": "A list of availability zones for the multiplex.", "Output$AudioDescriptionNames": "The names of the AudioDescriptions used as audio sources for this output.", @@ -4643,13 +4644,13 @@ "StaticImageOutputActivateScheduleActionSettings$OutputNames": "The name(s) of the output(s) the activation should apply to.", "StaticImageOutputDeactivateScheduleActionSettings$OutputNames": "The name(s) of the output(s) the deactivation should apply to.", "UpdateInput$InputSecurityGroups": "A list of security groups referenced by IDs to attach to the input.", - "VpcOutputSettings$PublicAddressAllocationIds": "List of public address allocation ids to associate with ENIs that will be created in Output VPC.\nMust specify one for SINGLE_PIPELINE, two for STANDARD channels\n", - "VpcOutputSettings$SecurityGroupIds": "A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces.\nIf none are specified then the VPC default security group will be used\n", - "VpcOutputSettings$SubnetIds": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ).\n", - "VpcOutputSettingsDescription$AvailabilityZones": "The Availability Zones where the vpc subnets are located.\nThe first Availability Zone applies to the first subnet in the list of subnets.\nThe second Availability Zone applies to the second subnet.\n", - "VpcOutputSettingsDescription$NetworkInterfaceIds": "A list of Elastic Network Interfaces created by MediaLive in the customer's VPC\n", - "VpcOutputSettingsDescription$SecurityGroupIds": "A list of up EC2 VPC security group IDs attached to the Output VPC network interfaces.\n", - "VpcOutputSettingsDescription$SubnetIds": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ).\n" + "VpcOutputSettings$PublicAddressAllocationIds": "List of public address allocation ids to associate with ENIs that will be created in Output VPC.\nMust specify one for SINGLE_PIPELINE, two for STANDARD channels", + "VpcOutputSettings$SecurityGroupIds": "A list of up to 5 EC2 VPC security group IDs to attach to the Output VPC network interfaces.\nIf none are specified then the VPC default security group will be used", + "VpcOutputSettings$SubnetIds": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ).", + "VpcOutputSettingsDescription$AvailabilityZones": "The Availability Zones where the vpc subnets are located.\nThe first Availability Zone applies to the first subnet in the list of subnets.\nThe second Availability Zone applies to the second subnet.", + "VpcOutputSettingsDescription$NetworkInterfaceIds": "A list of Elastic Network Interfaces created by MediaLive in the customer's VPC", + "VpcOutputSettingsDescription$SecurityGroupIds": "A list of up EC2 VPC security group IDs attached to the Output VPC network interfaces.", + "VpcOutputSettingsDescription$SubnetIds": "A list of VPC subnet IDs from the same VPC.\nIf STANDARD channel, subnet IDs must be mapped to two unique availability zones (AZ)." } }, "__longMin0Max1099511627775": { @@ -4720,17 +4721,17 @@ "ClaimDeviceRequest$Id": "The id of the device you want to claim.", "ColorCorrection$Uri": "The URI of the 3D LUT file. The protocol must be 's3:' or 's3ssl:':.", "CreateChannel$Name": "Name of channel.", - "CreateChannel$RequestId": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.\n", + "CreateChannel$RequestId": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.", "CreateChannel$Reserved": "Deprecated field that's only usable by whitelisted customers.", "CreateChannel$RoleArn": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel.", "CreateInput$Name": "Name of the input.", - "CreateInput$RequestId": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "CreateInput$RequestId": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "CreateInput$RoleArn": "The Amazon Resource Name (ARN) of the role this input assumes during and after creation.", "CreateMultiplex$Name": "Name of multiplex.", - "CreateMultiplex$RequestId": "Unique request ID. This prevents retries from creating multiple\nresources.\n", + "CreateMultiplex$RequestId": "Unique request ID. This prevents retries from creating multiple\nresources.", "CreateMultiplexProgram$ProgramName": "Name of multiplex program.", - "CreateMultiplexProgram$RequestId": "Unique request ID. This prevents retries from creating multiple\nresources.\n", - "CreatePartnerInput$RequestId": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", + "CreateMultiplexProgram$RequestId": "Unique request ID. This prevents retries from creating multiple\nresources.", + "CreatePartnerInput$RequestId": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.", "DvbSubDestinationSettings$FontSize": "When set to auto fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match.", "EbuTtDDestinationSettings$FontFamily": "Specifies the font family to include in the font data attached to the EBU-TT captions. Valid only if styleControl is set to include. If you leave this field empty, the font family is set to \"monospaced\". (If styleControl is set to exclude, the font family is always set to \"monospaced\".)\n\nYou specify only the font family. All other style information (color, bold, position and so on) is copied from the input captions. The size is always set to 100% to allow the downstream player to choose the size.\n\n- Enter a list of font families, as a comma-separated list of font names, in order of preference. The name can be a font family (such as \u201cArial\u201d), or a generic font family (such as \u201cserif\u201d), or \u201cdefault\u201d (to let the downstream player choose the font).\n- Leave blank to set the family to \u201cmonospace\u201d.", "EpochLockingSettings$CustomEpoch": "Optional. Enter a value here to use a custom epoch, instead of the standard epoch (which started at 1970-01-01T00:00:00 UTC). Specify the start time of the custom epoch, in YYYY-MM-DDTHH:MM:SS in UTC. The time must be 2000-01-01T00:00:00 or later. Always set the MM:SS portion to 00:00.", @@ -4760,15 +4761,12 @@ "Input$RoleArn": "The Amazon Resource Name (ARN) of the role this input assumes during and after creation.", "InputAttachment$InputAttachmentName": "User-specified name for the attachment. This is required if the user wants to use this input in an input switch action.", "InputAttachment$InputId": "The ID of the input", - "InputDestination$Ip": "The system-generated static IP address of endpoint.\nIt remains fixed for the lifetime of the input.\n", - "InputDestination$NetworkRouteCidr": "This is the networkRouteCidr of the on-prem push input. This is required for inputs of this type so that\nthe encoder can properly route in the input.\n", + "InputDestination$Ip": "The system-generated static IP address of endpoint.\nIt remains fixed for the lifetime of the input.", "InputDestination$Port": "The port number for the input.", - "InputDestination$Url": "This represents the endpoint that the customer stream will be\npushed to.\n", - "InputDestinationRequest$NetworkRouteCidr": "If the push input has an input location of ON-PREM it's a requirement to specify what the route of the input\nis going to be on the customer local network.\n", - "InputDestinationRequest$StaticIpAddress": "If the push input has an input location of ON-PREM it's a requirement to specify what the ip address\nof the input is going to be on the customer local network.\n", - "InputDestinationRequest$StreamName": "A unique name for the location the RTMP stream is being pushed\nto.\n", - "InputDestinationVpc$AvailabilityZone": "The availability zone of the Input destination.\n", - "InputDestinationVpc$NetworkInterfaceId": "The network interface ID of the Input destination in the VPC.\n", + "InputDestination$Url": "This represents the endpoint that the customer stream will be\npushed to.", + "InputDestinationRequest$StreamName": "A unique name for the location the RTMP stream is being pushed\nto.", + "InputDestinationVpc$AvailabilityZone": "The availability zone of the Input destination.", + "InputDestinationVpc$NetworkInterfaceId": "The network interface ID of the Input destination in the VPC.", "InputDevice$Arn": "The unique ARN of the input device.", "InputDevice$AvailabilityZone": "The Availability Zone associated with this input device.", "InputDevice$Id": "The unique ID of the input device.", @@ -4815,10 +4813,10 @@ "InputSecurityGroup$Arn": "Unique ARN of Input Security Group", "InputSecurityGroup$Id": "The Id of the Input Security Group", "InputSource$PasswordParam": "The key used to extract the password from EC2 Parameter store.", - "InputSource$Url": "This represents the customer's source URL where stream is\npulled from.\n", + "InputSource$Url": "This represents the customer's source URL where stream is\npulled from.", "InputSource$Username": "The username for the input source.", "InputSourceRequest$PasswordParam": "The key used to extract the password from EC2 Parameter store.", - "InputSourceRequest$Url": "This represents the customer's source URL where stream is\npulled from.\n", + "InputSourceRequest$Url": "This represents the customer's source URL where stream is\npulled from.", "InputSourceRequest$Username": "The username for the input source.", "InputSwitchScheduleActionSettings$InputAttachmentNameReference": "The name of the input attachment (not the name of the input!) to switch to. The name is specified in the channel configuration.", "InputWhitelistRule$Cidr": "The IPv4 CIDR that's whitelisted.", @@ -5099,6 +5097,24 @@ "refs": { "InputDeviceUhdSettings$AudioChannelPairs": "An array of eight audio configurations, one for each audio pair in the source. Each audio configuration specifies either to exclude the pair, or to format it and include it in the output from the UHD device. Applies only when the device is configured as the source for a MediaConnect flow." } + }, + "ChannelPipelineIdToRestart": { + "base": "Property of RestartChannelPipelinesRequest", + "refs": { + "__listOfChannelPipelineIdToRestart$member": null + } + }, + "RestartChannelPipelinesRequest": { + "base": null, + "refs": { + } + }, + "__listOfChannelPipelineIdToRestart": { + "base": null, + "refs": { + "RestartChannelPipelinesRequest$PipelineIds": "An array of pipelines to restart in this channel. Format PIPELINE_0 or PIPELINE_1." + } } - } + }, + "documentation": "API for AWS Elemental MediaLive" } diff --git a/apis/ssm/2014-11-06/api-2.json b/apis/ssm/2014-11-06/api-2.json index b3da45a33f1..19a1d600c7a 100644 --- a/apis/ssm/2014-11-06/api-2.json +++ b/apis/ssm/2014-11-06/api-2.json @@ -390,7 +390,10 @@ "errors":[ {"shape":"InternalServerError"}, {"shape":"ResourcePolicyInvalidParameterException"}, - {"shape":"ResourcePolicyConflictException"} + {"shape":"ResourcePolicyConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MalformedResourcePolicyDocumentException"}, + {"shape":"ResourcePolicyNotFoundException"} ] }, "DeregisterManagedInstance":{ @@ -1235,7 +1238,8 @@ "output":{"shape":"GetResourcePoliciesResponse"}, "errors":[ {"shape":"InternalServerError"}, - {"shape":"ResourcePolicyInvalidParameterException"} + {"shape":"ResourcePolicyInvalidParameterException"}, + {"shape":"ResourceNotFoundException"} ] }, "GetServiceSetting":{ @@ -1593,7 +1597,10 @@ {"shape":"InternalServerError"}, {"shape":"ResourcePolicyInvalidParameterException"}, {"shape":"ResourcePolicyLimitExceededException"}, - {"shape":"ResourcePolicyConflictException"} + {"shape":"ResourcePolicyConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"MalformedResourcePolicyDocumentException"}, + {"shape":"ResourcePolicyNotFoundException"} ] }, "RegisterDefaultPatchBaseline":{ @@ -4479,7 +4486,11 @@ "shape":"MaxResults", "box":true }, - "NextToken":{"shape":"NextToken"} + "NextToken":{"shape":"NextToken"}, + "Shared":{ + "shape":"Boolean", + "box":true + } } }, "DescribeParametersResult":{ @@ -7430,6 +7441,13 @@ "type":"list", "member":{"shape":"MaintenanceWindowIdentityForTarget"} }, + "MalformedResourcePolicyDocumentException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, "ManagedInstanceId":{ "type":"string", "max":124, @@ -8401,6 +8419,7 @@ "type":"structure", "members":{ "Name":{"shape":"PSParameterName"}, + "ARN":{"shape":"String"}, "Type":{"shape":"ParameterType"}, "KeyId":{"shape":"ParameterKeyId"}, "LastModifiedDate":{"shape":"DateTime"}, @@ -9001,7 +9020,7 @@ }, "Policy":{ "type":"string", - "pattern":"\\S+" + "pattern":"^(?!\\s*$).+" }, "PolicyHash":{"type":"string"}, "PolicyId":{"type":"string"}, @@ -9560,6 +9579,13 @@ }, "exception":true }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, "ResourcePolicyConflictException":{ "type":"structure", "members":{ @@ -9589,6 +9615,13 @@ "max":50, "min":1 }, + "ResourcePolicyNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true + }, "ResourcePolicyParameterNamesList":{ "type":"list", "member":{"shape":"String"} diff --git a/apis/ssm/2014-11-06/docs-2.json b/apis/ssm/2014-11-06/docs-2.json index dcae74eeaf2..03ef4f80f17 100644 --- a/apis/ssm/2014-11-06/docs-2.json +++ b/apis/ssm/2014-11-06/docs-2.json @@ -26,7 +26,7 @@ "DeleteParameters": "

Delete a list of parameters. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

", "DeletePatchBaseline": "

Deletes a patch baseline.

", "DeleteResourceDataSync": "

Deletes a resource data sync configuration. After the configuration is deleted, changes to data on managed nodes are no longer synced to or from the target. Deleting a sync configuration doesn't delete data.

", - "DeleteResourcePolicy": "

Deletes a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. Currently, OpsItemGroup is the only resource that supports Systems Manager resource policies. The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

", + "DeleteResourcePolicy": "

Deletes a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. The following resources support Systems Manager resource policies.

  • OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

  • Parameter - The resource policy is used to share a parameter with other accounts using Resource Access Manager (RAM). For more information about cross-account sharing of parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

", "DeregisterManagedInstance": "

Removes the server or virtual machine from the list of registered servers. You can reregister the node again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling SSM Agent first.

", "DeregisterPatchBaselineForPatchGroup": "

Removes a patch group from a patch baseline.

", "DeregisterTargetFromMaintenanceWindow": "

Removes a target from a maintenance window.

", @@ -57,7 +57,7 @@ "DescribeMaintenanceWindows": "

Retrieves the maintenance windows in an Amazon Web Services account.

", "DescribeMaintenanceWindowsForTarget": "

Retrieves information about the maintenance window targets or tasks that a managed node is associated with.

", "DescribeOpsItems": "

Query a set of OpsItems. You must have permission in Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Set up OpsCenter in the Amazon Web Services Systems Manager User Guide.

Operations engineers and IT professionals use Amazon Web Services Systems Manager OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their Amazon Web Services resources. For more information, see OpsCenter in the Amazon Web Services Systems Manager User Guide.

", - "DescribeParameters": "

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

", + "DescribeParameters": "

Lists the parameters in your Amazon Web Services account or the parameters shared with you when you enable the Shared option.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

", "DescribePatchBaselines": "

Lists the patch baselines in your Amazon Web Services account.

", "DescribePatchGroupState": "

Returns high-level aggregated patch compliance state information for a patch group.

", "DescribePatchGroups": "

Lists all patch groups that have been registered with patch baselines.

", @@ -110,7 +110,7 @@ "PutComplianceItems": "

Registers a compliance type and other compliance details on a designated resource. This operation lets you register custom compliance details with a resource. This call overwrites existing compliance information on the resource, so you must provide a full list of compliance items each time that you send the request.

ComplianceType can be one of the following:

  • ExecutionId: The execution ID when the patch, association, or custom compliance item was applied.

  • ExecutionType: Specify patch, association, or Custom:string.

  • ExecutionTime. The time the patch, association, or custom compliance item was applied to the managed node.

  • Id: The patch, association, or custom compliance ID.

  • Title: A title.

  • Status: The status of the compliance item. For example, approved for patches, or Failed for associations.

  • Severity: A patch severity. For example, Critical.

  • DocumentName: An SSM document name. For example, AWS-RunPatchBaseline.

  • DocumentVersion: An SSM document version number. For example, 4.

  • Classification: A patch classification. For example, security updates.

  • PatchBaselineId: A patch baseline ID.

  • PatchSeverity: A patch severity. For example, Critical.

  • PatchState: A patch state. For example, InstancesWithFailedPatches.

  • PatchGroup: The name of a patch group.

  • InstalledTime: The time the association, patch, or custom compliance item was applied to the resource. Specify the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

", "PutInventory": "

Bulk update custom inventory items on one or more managed nodes. The request adds an inventory item, if it doesn't already exist, or updates an inventory item, if it does exist.

", "PutParameter": "

Add a parameter to the system.

", - "PutResourcePolicy": "

Creates or updates a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. Currently, OpsItemGroup is the only resource that supports Systems Manager resource policies. The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

", + "PutResourcePolicy": "

Creates or updates a Systems Manager resource policy. A resource policy helps you to define the IAM entity (for example, an Amazon Web Services account) that can manage your Systems Manager resources. The following resources support Systems Manager resource policies.

  • OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web Services accounts to view and interact with OpsCenter operational work items (OpsItems).

  • Parameter - The resource policy is used to share a parameter with other accounts using Resource Access Manager (RAM).

    To share a parameter, it must be in the advanced parameter tier. For information about parameter tiers, see Managing parameter tiers. For information about changing an existing standard parameter to an advanced parameter, see Changing a standard parameter to an advanced parameter.

    To share a SecureString parameter, it must be encrypted with a customer managed key, and you must share the key separately through Key Management Service. Amazon Web Services managed keys cannot be shared. Parameters encrypted with the default Amazon Web Services managed key can be updated to use a customer managed key instead. For KMS key definitions, see KMS concepts in the Key Management Service Developer Guide.

    While you can share a parameter using the Systems Manager PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. This is because using PutResourcePolicy requires the extra step of promoting the parameter to a standard RAM Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation. Otherwise, the parameter won't be returned by the Systems Manager DescribeParameters API operation using the --shared option.

    For more information, see Sharing a parameter in the Amazon Web Services Systems Manager User Guide

", "RegisterDefaultPatchBaseline": "

Defines the default patch baseline for the relevant operating system.

To reset the Amazon Web Services-predefined patch baseline as the default, specify the full patch baseline Amazon Resource Name (ARN) as the baseline ID value. For example, for CentOS, specify arn:aws:ssm:us-east-2:733109147000:patchbaseline/pb-0574b43a65ea646ed instead of pb-0574b43a65ea646ed.

", "RegisterPatchBaselineForPatchGroup": "

Registers a patch baseline for a patch group.

", "RegisterTargetWithMaintenanceWindow": "

Registers a target with a maintenance window.

", @@ -968,6 +968,7 @@ "DeleteDocumentRequest$Force": "

Some SSM document types require that you specify a Force flag before you can delete the document. For example, you must specify a Force flag to delete a document of type ApplicationConfigurationSchema. You can restrict access to the Force flag in an Identity and Access Management (IAM) policy.

", "DeregisterTargetFromMaintenanceWindowRequest$Safe": "

The system checks if the target is being referenced by a task. If the target is being referenced, the system returns an error and doesn't deregister the target from the maintenance window.

", "DescribeAutomationStepExecutionsRequest$ReverseOrder": "

Indicates whether to list step executions in reverse order by start time. The default value is 'false'.

", + "DescribeParametersRequest$Shared": "

Lists parameters that are shared with you.

By default when using this option, the command returns parameters that have been shared using a standard Resource Access Manager Resource Share. In order for a parameter that was shared using the PutResourcePolicy command to be returned, the associated RAM Resource Share Created From Policy must have been promoted to a standard Resource Share using the RAM PromoteResourceShareCreatedFromPolicy API operation.

For more information about sharing parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

", "DocumentVersionInfo$IsDefaultVersion": "

An identifier for the default version of the document.

", "GetParameterHistoryRequest$WithDecryption": "

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", "GetParameterRequest$WithDecryption": "

Return decrypted values for secure string parameters. This flag is ignored for String and StringList parameter types.

", @@ -4718,6 +4719,11 @@ "DescribeMaintenanceWindowsForTargetResult$WindowIdentities": "

Information about the maintenance window targets and tasks a managed node is associated with.

" } }, + "MalformedResourcePolicyDocumentException": { + "base": "

The specified policy document is malformed or invalid, or excessive PutResourcePolicy or DeleteResourcePolicy calls have been made.

", + "refs": { + } + }, "ManagedInstanceId": { "base": null, "refs": { @@ -5699,17 +5705,17 @@ "PSParameterName": { "base": null, "refs": { - "DeleteParameterRequest$Name": "

The name of the parameter to delete.

", - "GetParameterHistoryRequest$Name": "

The name of the parameter for which you want to review history.

", - "GetParameterRequest$Name": "

The name of the parameter you want to query.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

", + "DeleteParameterRequest$Name": "

The name of the parameter to delete.

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

", + "GetParameterHistoryRequest$Name": "

The name or Amazon Resource Name (ARN) of the parameter for which you want to review history. For parameters shared with you from another account, you must use the full ARN.

", + "GetParameterRequest$Name": "

The name or Amazon Resource Name (ARN) of the parameter that you want to query. For parameters shared with you from another account, you must use the full ARN.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

", "GetParametersByPathRequest$Path": "

The hierarchy for the parameter. Hierarchies start with a forward slash (/). The hierarchy is the parameter name except the last part of the parameter. For the API call to succeed, the last part of the parameter name can't be in the path. A parameter name hierarchy can have a maximum of 15 levels. Here is an example of a hierarchy: /Finance/Prod/IAD/WinServ2016/license33

", - "LabelParameterVersionRequest$Name": "

The parameter name on which you want to attach one or more labels.

", + "LabelParameterVersionRequest$Name": "

The parameter name on which you want to attach one or more labels.

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

", "Parameter$Name": "

The name of the parameter.

", "ParameterHistory$Name": "

The name of the parameter.

", "ParameterMetadata$Name": "

The parameter name.

", "ParameterNameList$member": null, - "PutParameterRequest$Name": "

The fully qualified name of the parameter that you want to add to the system. The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an Amazon Web Services Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

The maximum length constraint of 2048 characters listed below includes 1037 characters reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is 1011 characters. This includes the characters in the ARN that precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/.

", - "UnlabelParameterVersionRequest$Name": "

The name of the parameter from which you want to delete one or more labels.

" + "PutParameterRequest$Name": "

The fully qualified name of the parameter that you want to add to the system.

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

The fully qualified name includes the complete hierarchy of the parameter path and name. For parameters in a hierarchy, you must include a leading forward slash character (/) when you create or reference a parameter. For example: /Dev/DBServer/MySQL/db-string13

Naming Constraints:

  • Parameter names are case sensitive.

  • A parameter name must be unique within an Amazon Web Services Region

  • A parameter name can't be prefixed with \"aws\" or \"ssm\" (case-insensitive).

  • Parameter names can include only the following symbols and letters: a-zA-Z0-9_.-

    In addition, the slash character ( / ) is used to delineate hierarchies in parameter names. For example: /Dev/Production/East/Project-ABC/MyParameter

  • A parameter name can't include spaces.

  • Parameter hierarchies are limited to a maximum depth of fifteen levels.

For additional information about valid values for parameter names, see Creating Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

The maximum length constraint of 2048 characters listed below includes 1037 characters reserved for internal use by Systems Manager. The maximum length for a parameter name that you create is 1011 characters. This includes the characters in the ARN that precede the name you specify, such as arn:aws:ssm:us-east-2:111122223333:parameter/.

", + "UnlabelParameterVersionRequest$Name": "

The name of the parameter from which you want to delete one or more labels.

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

" } }, "PSParameterSelector": { @@ -5828,7 +5834,7 @@ } }, "ParameterMetadata": { - "base": "

Metadata includes information like the ARN of the last user and the date/time the parameter was last used.

", + "base": "

Metadata includes information like the Amazon Resource Name (ARN) of the last user to update the parameter and the date and time the parameter was last used.

", "refs": { "ParameterMetadataList$member": null } @@ -5848,10 +5854,10 @@ "ParameterNameList": { "base": null, "refs": { - "DeleteParametersRequest$Names": "

The names of the parameters to delete. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

", + "DeleteParametersRequest$Names": "

The names of the parameters to delete. After deleting a parameter, wait for at least 30 seconds to create a parameter with the same name.

You can't enter the Amazon Resource Name (ARN) for a parameter, only the parameter name itself.

", "DeleteParametersResult$DeletedParameters": "

The names of the deleted parameters.

", "DeleteParametersResult$InvalidParameters": "

The names of parameters that weren't deleted because the parameters aren't valid.

", - "GetParametersRequest$Names": "

Names of the parameters for which you want to query information.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

", + "GetParametersRequest$Names": "

The names or Amazon Resource Names (ARNs) of the parameters that you want to query. For parameters shared with you from another account, you must use the full ARNs.

To query by parameter label, use \"Name\": \"name:label\". To query by parameter version, use \"Name\": \"name:version\".

For more information about shared parameters, see Working with shared parameters in the Amazon Web Services Systems Manager User Guide.

", "GetParametersResult$InvalidParameters": "

A list of parameters that aren't formatted correctly or don't run during an execution.

" } }, @@ -7061,6 +7067,11 @@ "refs": { } }, + "ResourceNotFoundException": { + "base": "

The specified parameter to be shared could not be found.

", + "refs": { + } + }, "ResourcePolicyConflictException": { "base": "

The hash provided in the call doesn't match the stored hash. This exception is thrown when trying to update an obsolete policy version or when multiple requests to update a policy are sent.

", "refs": { @@ -7082,6 +7093,11 @@ "GetResourcePoliciesRequest$MaxResults": "

The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results.

" } }, + "ResourcePolicyNotFoundException": { + "base": "

No policies with the specified policy ID and hash could be found.

", + "refs": { + } + }, "ResourcePolicyParameterNamesList": { "base": null, "refs": { @@ -7759,6 +7775,7 @@ "ListOpsItemEventsResponse$NextToken": "

The token for the next set of items to return. Use this token to get the next set of results.

", "ListOpsItemRelatedItemsRequest$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "ListOpsItemRelatedItemsResponse$NextToken": "

The token for the next set of items to return. Use this token to get the next set of results.

", + "MalformedResourcePolicyDocumentException$Message": null, "MaxDocumentSizeExceeded$Message": null, "NormalStringMap$key": null, "NormalStringMap$value": null, @@ -7802,6 +7819,7 @@ "ParameterInlinePolicy$PolicyStatus": "

The status of the policy. Policies report the following statuses: Pending (the policy hasn't been enforced or applied yet), Finished (the policy was applied), Failed (the policy wasn't applied), or InProgress (the policy is being applied now).

", "ParameterLimitExceeded$message": null, "ParameterMaxVersionLimitExceeded$message": null, + "ParameterMetadata$ARN": "

The (ARN) of the last user to update the parameter.

", "ParameterMetadata$LastModifiedUser": "

Amazon Resource Name (ARN) of the Amazon Web Services user who last changed the parameter.

", "ParameterNotFound$message": null, "ParameterPatternMismatchException$message": "

The parameter name isn't valid.

", @@ -7818,10 +7836,12 @@ "ResourceDataSyncNotFoundException$Message": null, "ResourceInUseException$Message": null, "ResourceLimitExceededException$Message": null, + "ResourceNotFoundException$Message": null, "ResourcePolicyConflictException$Message": null, "ResourcePolicyInvalidParameterException$Message": null, "ResourcePolicyLimitExceededException$LimitType": null, "ResourcePolicyLimitExceededException$Message": null, + "ResourcePolicyNotFoundException$Message": null, "ResourcePolicyParameterNamesList$member": null, "ServiceSetting$LastModifiedUser": "

The ARN of the last modified user. This field is populated only if the setting value was overwritten.

", "ServiceSetting$ARN": "

The ARN of the service setting.

", diff --git a/gems/aws-sdk-iotevents/CHANGELOG.md b/gems/aws-sdk-iotevents/CHANGELOG.md index 1f08449733e..bf7be42a543 100644 --- a/gems/aws-sdk-iotevents/CHANGELOG.md +++ b/gems/aws-sdk-iotevents/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.46.0 (2024-02-21) +------------------ + +* Feature - Increase the maximum length of descriptions for Inputs, Detector Models, and Alarm Models + 1.45.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-iotevents/VERSION b/gems/aws-sdk-iotevents/VERSION index 50aceaa7b71..0a3db35b241 100644 --- a/gems/aws-sdk-iotevents/VERSION +++ b/gems/aws-sdk-iotevents/VERSION @@ -1 +1 @@ -1.45.0 +1.46.0 diff --git a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents.rb b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents.rb index 15e9f356f3b..6d6263eac14 100644 --- a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents.rb +++ b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents.rb @@ -52,6 +52,6 @@ # @!group service module Aws::IoTEvents - GEM_VERSION = '1.45.0' + GEM_VERSION = '1.46.0' end diff --git a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/client.rb b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/client.rb index 9d25079c715..395e350635b 100644 --- a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/client.rb +++ b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/client.rb @@ -3444,7 +3444,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-iotevents' - context[:gem_version] = '1.45.0' + context[:gem_version] = '1.46.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/endpoint_provider.rb b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/endpoint_provider.rb index 4c9bf4df196..353b4ab8301 100644 --- a/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/endpoint_provider.rb +++ b/gems/aws-sdk-iotevents/lib/aws-sdk-iotevents/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://iotevents-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-lookoutequipment/CHANGELOG.md b/gems/aws-sdk-lookoutequipment/CHANGELOG.md index 55bf9f77fa2..a84d2aad1cf 100644 --- a/gems/aws-sdk-lookoutequipment/CHANGELOG.md +++ b/gems/aws-sdk-lookoutequipment/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.29.0 (2024-02-21) +------------------ + +* Feature - This release adds a field exposing model quality to read APIs for models. It also adds a model quality field to the API response when creating an inference scheduler. + 1.28.0 (2024-02-14) ------------------ diff --git a/gems/aws-sdk-lookoutequipment/VERSION b/gems/aws-sdk-lookoutequipment/VERSION index cfc730712d5..5e57fb89558 100644 --- a/gems/aws-sdk-lookoutequipment/VERSION +++ b/gems/aws-sdk-lookoutequipment/VERSION @@ -1 +1 @@ -1.28.0 +1.29.0 diff --git a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment.rb b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment.rb index b29df7cd16d..8be26ee7bb9 100644 --- a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment.rb +++ b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment.rb @@ -52,6 +52,6 @@ # @!group service module Aws::LookoutEquipment - GEM_VERSION = '1.28.0' + GEM_VERSION = '1.29.0' end diff --git a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client.rb b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client.rb index 51a83b490b5..47d344afd4f 100644 --- a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client.rb +++ b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client.rb @@ -541,6 +541,7 @@ def create_dataset(params = {}, options = {}) # * {Types::CreateInferenceSchedulerResponse#inference_scheduler_arn #inference_scheduler_arn} => String # * {Types::CreateInferenceSchedulerResponse#inference_scheduler_name #inference_scheduler_name} => String # * {Types::CreateInferenceSchedulerResponse#status #status} => String + # * {Types::CreateInferenceSchedulerResponse#model_quality #model_quality} => String # # @example Request syntax with placeholder values # @@ -583,6 +584,7 @@ def create_dataset(params = {}, options = {}) # resp.inference_scheduler_arn #=> String # resp.inference_scheduler_name #=> String # resp.status #=> String, one of "PENDING", "RUNNING", "STOPPING", "STOPPED" + # resp.model_quality #=> String, one of "QUALITY_THRESHOLD_MET", "CANNOT_DETERMINE_QUALITY", "POOR_QUALITY_DETECTED" # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateInferenceScheduler AWS API Documentation # @@ -1496,6 +1498,7 @@ def describe_label_group(params = {}, options = {}) # * {Types::DescribeModelResponse#accumulated_inference_data_end_time #accumulated_inference_data_end_time} => Time # * {Types::DescribeModelResponse#retraining_scheduler_status #retraining_scheduler_status} => String # * {Types::DescribeModelResponse#model_diagnostics_output_configuration #model_diagnostics_output_configuration} => Types::ModelDiagnosticsOutputConfiguration + # * {Types::DescribeModelResponse#model_quality #model_quality} => String # # @example Request syntax with placeholder values # @@ -1550,6 +1553,7 @@ def describe_label_group(params = {}, options = {}) # resp.model_diagnostics_output_configuration.s3_output_configuration.bucket #=> String # resp.model_diagnostics_output_configuration.s3_output_configuration.prefix #=> String # resp.model_diagnostics_output_configuration.kms_key_id #=> String + # resp.model_quality #=> String, one of "QUALITY_THRESHOLD_MET", "CANNOT_DETERMINE_QUALITY", "POOR_QUALITY_DETECTED" # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModel AWS API Documentation # @@ -1604,6 +1608,7 @@ def describe_model(params = {}, options = {}) # * {Types::DescribeModelVersionResponse#auto_promotion_result_reason #auto_promotion_result_reason} => String # * {Types::DescribeModelVersionResponse#model_diagnostics_output_configuration #model_diagnostics_output_configuration} => Types::ModelDiagnosticsOutputConfiguration # * {Types::DescribeModelVersionResponse#model_diagnostics_results_object #model_diagnostics_results_object} => Types::S3Object + # * {Types::DescribeModelVersionResponse#model_quality #model_quality} => String # # @example Request syntax with placeholder values # @@ -1653,6 +1658,7 @@ def describe_model(params = {}, options = {}) # resp.model_diagnostics_output_configuration.kms_key_id #=> String # resp.model_diagnostics_results_object.bucket #=> String # resp.model_diagnostics_results_object.key #=> String + # resp.model_quality #=> String, one of "QUALITY_THRESHOLD_MET", "CANNOT_DETERMINE_QUALITY", "POOR_QUALITY_DETECTED" # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModelVersion AWS API Documentation # @@ -2401,6 +2407,7 @@ def list_labels(params = {}, options = {}) # resp.model_version_summaries[0].created_at #=> Time # resp.model_version_summaries[0].status #=> String, one of "IN_PROGRESS", "SUCCESS", "FAILED", "IMPORT_IN_PROGRESS", "CANCELED" # resp.model_version_summaries[0].source_type #=> String, one of "TRAINING", "RETRAINING", "IMPORT" + # resp.model_version_summaries[0].model_quality #=> String, one of "QUALITY_THRESHOLD_MET", "CANNOT_DETERMINE_QUALITY", "POOR_QUALITY_DETECTED" # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListModelVersions AWS API Documentation # @@ -2468,6 +2475,7 @@ def list_model_versions(params = {}, options = {}) # resp.model_summaries[0].model_diagnostics_output_configuration.s3_output_configuration.bucket #=> String # resp.model_summaries[0].model_diagnostics_output_configuration.s3_output_configuration.prefix #=> String # resp.model_summaries[0].model_diagnostics_output_configuration.kms_key_id #=> String + # resp.model_summaries[0].model_quality #=> String, one of "QUALITY_THRESHOLD_MET", "CANNOT_DETERMINE_QUALITY", "POOR_QUALITY_DETECTED" # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ListModels AWS API Documentation # @@ -3294,7 +3302,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-lookoutequipment' - context[:gem_version] = '1.28.0' + context[:gem_version] = '1.29.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client_api.rb b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client_api.rb index 229204456a9..78ce728d330 100644 --- a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client_api.rb +++ b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/client_api.rb @@ -165,6 +165,7 @@ module ClientApi ModelMetrics = Shapes::StringShape.new(name: 'ModelMetrics') ModelName = Shapes::StringShape.new(name: 'ModelName') ModelPromoteMode = Shapes::StringShape.new(name: 'ModelPromoteMode') + ModelQuality = Shapes::StringShape.new(name: 'ModelQuality') ModelStatus = Shapes::StringShape.new(name: 'ModelStatus') ModelSummaries = Shapes::ListShape.new(name: 'ModelSummaries') ModelSummary = Shapes::StructureShape.new(name: 'ModelSummary') @@ -273,6 +274,7 @@ module ClientApi CreateInferenceSchedulerResponse.add_member(:inference_scheduler_arn, Shapes::ShapeRef.new(shape: InferenceSchedulerArn, location_name: "InferenceSchedulerArn")) CreateInferenceSchedulerResponse.add_member(:inference_scheduler_name, Shapes::ShapeRef.new(shape: InferenceSchedulerName, location_name: "InferenceSchedulerName")) CreateInferenceSchedulerResponse.add_member(:status, Shapes::ShapeRef.new(shape: InferenceSchedulerStatus, location_name: "Status")) + CreateInferenceSchedulerResponse.add_member(:model_quality, Shapes::ShapeRef.new(shape: ModelQuality, location_name: "ModelQuality")) CreateInferenceSchedulerResponse.struct_class = Types::CreateInferenceSchedulerResponse CreateLabelGroupRequest.add_member(:label_group_name, Shapes::ShapeRef.new(shape: LabelGroupName, required: true, location_name: "LabelGroupName")) @@ -511,6 +513,7 @@ module ClientApi DescribeModelResponse.add_member(:accumulated_inference_data_end_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "AccumulatedInferenceDataEndTime")) DescribeModelResponse.add_member(:retraining_scheduler_status, Shapes::ShapeRef.new(shape: RetrainingSchedulerStatus, location_name: "RetrainingSchedulerStatus")) DescribeModelResponse.add_member(:model_diagnostics_output_configuration, Shapes::ShapeRef.new(shape: ModelDiagnosticsOutputConfiguration, location_name: "ModelDiagnosticsOutputConfiguration")) + DescribeModelResponse.add_member(:model_quality, Shapes::ShapeRef.new(shape: ModelQuality, location_name: "ModelQuality")) DescribeModelResponse.struct_class = Types::DescribeModelResponse DescribeModelVersionRequest.add_member(:model_name, Shapes::ShapeRef.new(shape: ModelName, required: true, location_name: "ModelName")) @@ -551,6 +554,7 @@ module ClientApi DescribeModelVersionResponse.add_member(:auto_promotion_result_reason, Shapes::ShapeRef.new(shape: AutoPromotionResultReason, location_name: "AutoPromotionResultReason")) DescribeModelVersionResponse.add_member(:model_diagnostics_output_configuration, Shapes::ShapeRef.new(shape: ModelDiagnosticsOutputConfiguration, location_name: "ModelDiagnosticsOutputConfiguration")) DescribeModelVersionResponse.add_member(:model_diagnostics_results_object, Shapes::ShapeRef.new(shape: S3Object, location_name: "ModelDiagnosticsResultsObject")) + DescribeModelVersionResponse.add_member(:model_quality, Shapes::ShapeRef.new(shape: ModelQuality, location_name: "ModelQuality")) DescribeModelVersionResponse.struct_class = Types::DescribeModelVersionResponse DescribeResourcePolicyRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: ResourceArn, required: true, location_name: "ResourceArn")) @@ -891,6 +895,7 @@ module ClientApi ModelSummary.add_member(:next_scheduled_retraining_start_date, Shapes::ShapeRef.new(shape: Timestamp, location_name: "NextScheduledRetrainingStartDate")) ModelSummary.add_member(:retraining_scheduler_status, Shapes::ShapeRef.new(shape: RetrainingSchedulerStatus, location_name: "RetrainingSchedulerStatus")) ModelSummary.add_member(:model_diagnostics_output_configuration, Shapes::ShapeRef.new(shape: ModelDiagnosticsOutputConfiguration, location_name: "ModelDiagnosticsOutputConfiguration")) + ModelSummary.add_member(:model_quality, Shapes::ShapeRef.new(shape: ModelQuality, location_name: "ModelQuality")) ModelSummary.struct_class = Types::ModelSummary ModelVersionSummaries.member = Shapes::ShapeRef.new(shape: ModelVersionSummary) @@ -902,6 +907,7 @@ module ClientApi ModelVersionSummary.add_member(:created_at, Shapes::ShapeRef.new(shape: Timestamp, location_name: "CreatedAt")) ModelVersionSummary.add_member(:status, Shapes::ShapeRef.new(shape: ModelVersionStatus, location_name: "Status")) ModelVersionSummary.add_member(:source_type, Shapes::ShapeRef.new(shape: ModelVersionSourceType, location_name: "SourceType")) + ModelVersionSummary.add_member(:model_quality, Shapes::ShapeRef.new(shape: ModelQuality, location_name: "ModelQuality")) ModelVersionSummary.struct_class = Types::ModelVersionSummary MonotonicValues.add_member(:status, Shapes::ShapeRef.new(shape: StatisticalIssueStatus, required: true, location_name: "Status")) diff --git a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/types.rb b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/types.rb index 89bf8f10c66..dc4e9f934ac 100644 --- a/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/types.rb +++ b/gems/aws-sdk-lookoutequipment/lib/aws-sdk-lookoutequipment/types.rb @@ -242,12 +242,36 @@ class CreateInferenceSchedulerRequest < Struct.new( # Indicates the status of the `CreateInferenceScheduler` operation. # @return [String] # + # @!attribute [rw] model_quality + # Provides a quality assessment for a model that uses labels. If + # Lookout for Equipment determines that the model quality is poor + # based on training metrics, the value is `POOR_QUALITY_DETECTED`. + # Otherwise, the value is `QUALITY_THRESHOLD_MET`. + # + # If the model is unlabeled, the model quality can't be assessed and + # the value of `ModelQuality` is `CANNOT_DETERMINE_QUALITY`. In this + # situation, you can get a model quality assessment by adding labels + # to the input dataset and retraining the model. + # + # For information about using labels with your models, see + # [Understanding labeling][1]. + # + # For information about improving the quality of a model, see [Best + # practices with Amazon Lookout for Equipment][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-labeling.html + # [2]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/CreateInferenceSchedulerResponse AWS API Documentation # class CreateInferenceSchedulerResponse < Struct.new( :inference_scheduler_arn, :inference_scheduler_name, - :status) + :status, + :model_quality) SENSITIVE = [] include Aws::Structure end @@ -1525,6 +1549,29 @@ class DescribeModelRequest < Struct.new( # diagnostics. # @return [Types::ModelDiagnosticsOutputConfiguration] # + # @!attribute [rw] model_quality + # Provides a quality assessment for a model that uses labels. If + # Lookout for Equipment determines that the model quality is poor + # based on training metrics, the value is `POOR_QUALITY_DETECTED`. + # Otherwise, the value is `QUALITY_THRESHOLD_MET`. + # + # If the model is unlabeled, the model quality can't be assessed and + # the value of `ModelQuality` is `CANNOT_DETERMINE_QUALITY`. In this + # situation, you can get a model quality assessment by adding labels + # to the input dataset and retraining the model. + # + # For information about using labels with your models, see + # [Understanding labeling][1]. + # + # For information about improving the quality of a model, see [Best + # practices with Amazon Lookout for Equipment][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-labeling.html + # [2]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModelResponse AWS API Documentation # class DescribeModelResponse < Struct.new( @@ -1568,7 +1615,8 @@ class DescribeModelResponse < Struct.new( :accumulated_inference_data_start_time, :accumulated_inference_data_end_time, :retraining_scheduler_status, - :model_diagnostics_output_configuration) + :model_diagnostics_output_configuration, + :model_quality) SENSITIVE = [] include Aws::Structure end @@ -1773,6 +1821,29 @@ class DescribeModelVersionRequest < Struct.new( # the pointwise model diagnostics for the model version. # @return [Types::S3Object] # + # @!attribute [rw] model_quality + # Provides a quality assessment for a model that uses labels. If + # Lookout for Equipment determines that the model quality is poor + # based on training metrics, the value is `POOR_QUALITY_DETECTED`. + # Otherwise, the value is `QUALITY_THRESHOLD_MET`. + # + # If the model is unlabeled, the model quality can't be assessed and + # the value of `ModelQuality` is `CANNOT_DETERMINE_QUALITY`. In this + # situation, you can get a model quality assessment by adding labels + # to the input dataset and retraining the model. + # + # For information about using labels with your models, see + # [Understanding labeling][1]. + # + # For information about improving the quality of a model, see [Best + # practices with Amazon Lookout for Equipment][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-labeling.html + # [2]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/DescribeModelVersionResponse AWS API Documentation # class DescribeModelVersionResponse < Struct.new( @@ -1809,7 +1880,8 @@ class DescribeModelVersionResponse < Struct.new( :auto_promotion_result, :auto_promotion_result_reason, :model_diagnostics_output_configuration, - :model_diagnostics_results_object) + :model_diagnostics_results_object, + :model_quality) SENSITIVE = [] include Aws::Structure end @@ -3536,6 +3608,29 @@ class ModelDiagnosticsS3OutputConfiguration < Struct.new( # for an Amazon Lookout for Equipment model. # @return [Types::ModelDiagnosticsOutputConfiguration] # + # @!attribute [rw] model_quality + # Provides a quality assessment for a model that uses labels. If + # Lookout for Equipment determines that the model quality is poor + # based on training metrics, the value is `POOR_QUALITY_DETECTED`. + # Otherwise, the value is `QUALITY_THRESHOLD_MET`. + # + # If the model is unlabeled, the model quality can't be assessed and + # the value of `ModelQuality` is `CANNOT_DETERMINE_QUALITY`. In this + # situation, you can get a model quality assessment by adding labels + # to the input dataset and retraining the model. + # + # For information about using labels with your models, see + # [Understanding labeling][1]. + # + # For information about improving the quality of a model, see [Best + # practices with Amazon Lookout for Equipment][2]. + # + # + # + # [1]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-labeling.html + # [2]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ModelSummary AWS API Documentation # class ModelSummary < Struct.new( @@ -3552,7 +3647,8 @@ class ModelSummary < Struct.new( :latest_scheduled_retraining_start_time, :next_scheduled_retraining_start_date, :retraining_scheduler_status, - :model_diagnostics_output_configuration) + :model_diagnostics_output_configuration, + :model_quality) SENSITIVE = [] include Aws::Structure end @@ -3588,6 +3684,25 @@ class ModelSummary < Struct.new( # Indicates how this model version was generated. # @return [String] # + # @!attribute [rw] model_quality + # Provides a quality assessment for a model that uses labels. If + # Lookout for Equipment determines that the model quality is poor + # based on training metrics, the value is `POOR_QUALITY_DETECTED`. + # Otherwise, the value is `QUALITY_THRESHOLD_MET`. + # + # If the model is unlabeled, the model quality can't be assessed and + # the value of `ModelQuality` is `CANNOT_DETERMINE_QUALITY`. In this + # situation, you can get a model quality assessment by adding labels + # to the input dataset and retraining the model. + # + # For information about improving the quality of a model, see [Best + # practices with Amazon Lookout for Equipment][1]. + # + # + # + # [1]: https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/best-practices.html + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/lookoutequipment-2020-12-15/ModelVersionSummary AWS API Documentation # class ModelVersionSummary < Struct.new( @@ -3597,7 +3712,8 @@ class ModelVersionSummary < Struct.new( :model_version_arn, :created_at, :status, - :source_type) + :source_type, + :model_quality) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-lookoutequipment/sig/client.rbs b/gems/aws-sdk-lookoutequipment/sig/client.rbs index 6d4546dfc4d..b50a2ade900 100644 --- a/gems/aws-sdk-lookoutequipment/sig/client.rbs +++ b/gems/aws-sdk-lookoutequipment/sig/client.rbs @@ -101,6 +101,7 @@ module Aws def inference_scheduler_arn: () -> ::String def inference_scheduler_name: () -> ::String def status: () -> ("PENDING" | "RUNNING" | "STOPPING" | "STOPPED") + def model_quality: () -> ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/LookoutEquipment/Client.html#create_inference_scheduler-instance_method def create_inference_scheduler: ( @@ -426,6 +427,7 @@ module Aws def accumulated_inference_data_end_time: () -> ::Time def retraining_scheduler_status: () -> ("PENDING" | "RUNNING" | "STOPPING" | "STOPPED") def model_diagnostics_output_configuration: () -> Types::ModelDiagnosticsOutputConfiguration + def model_quality: () -> ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/LookoutEquipment/Client.html#describe_model-instance_method def describe_model: ( @@ -469,6 +471,7 @@ module Aws def auto_promotion_result_reason: () -> ::String def model_diagnostics_output_configuration: () -> Types::ModelDiagnosticsOutputConfiguration def model_diagnostics_results_object: () -> Types::S3Object + def model_quality: () -> ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/LookoutEquipment/Client.html#describe_model_version-instance_method def describe_model_version: ( diff --git a/gems/aws-sdk-lookoutequipment/sig/types.rbs b/gems/aws-sdk-lookoutequipment/sig/types.rbs index d6415c1d4a3..1629d0055ea 100644 --- a/gems/aws-sdk-lookoutequipment/sig/types.rbs +++ b/gems/aws-sdk-lookoutequipment/sig/types.rbs @@ -64,6 +64,7 @@ module Aws::LookoutEquipment attr_accessor inference_scheduler_arn: ::String attr_accessor inference_scheduler_name: ::String attr_accessor status: ("PENDING" | "RUNNING" | "STOPPING" | "STOPPED") + attr_accessor model_quality: ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") SENSITIVE: [] end @@ -362,6 +363,7 @@ module Aws::LookoutEquipment attr_accessor accumulated_inference_data_end_time: ::Time attr_accessor retraining_scheduler_status: ("PENDING" | "RUNNING" | "STOPPING" | "STOPPED") attr_accessor model_diagnostics_output_configuration: Types::ModelDiagnosticsOutputConfiguration + attr_accessor model_quality: ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") SENSITIVE: [] end @@ -406,6 +408,7 @@ module Aws::LookoutEquipment attr_accessor auto_promotion_result_reason: ::String attr_accessor model_diagnostics_output_configuration: Types::ModelDiagnosticsOutputConfiguration attr_accessor model_diagnostics_results_object: Types::S3Object + attr_accessor model_quality: ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") SENSITIVE: [] end @@ -844,6 +847,7 @@ module Aws::LookoutEquipment attr_accessor next_scheduled_retraining_start_date: ::Time attr_accessor retraining_scheduler_status: ("PENDING" | "RUNNING" | "STOPPING" | "STOPPED") attr_accessor model_diagnostics_output_configuration: Types::ModelDiagnosticsOutputConfiguration + attr_accessor model_quality: ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") SENSITIVE: [] end @@ -855,6 +859,7 @@ module Aws::LookoutEquipment attr_accessor created_at: ::Time attr_accessor status: ("IN_PROGRESS" | "SUCCESS" | "FAILED" | "IMPORT_IN_PROGRESS" | "CANCELED") attr_accessor source_type: ("TRAINING" | "RETRAINING" | "IMPORT") + attr_accessor model_quality: ("QUALITY_THRESHOLD_MET" | "CANNOT_DETERMINE_QUALITY" | "POOR_QUALITY_DETECTED") SENSITIVE: [] end diff --git a/gems/aws-sdk-medialive/CHANGELOG.md b/gems/aws-sdk-medialive/CHANGELOG.md index dd1f057429b..126e7b50f16 100644 --- a/gems/aws-sdk-medialive/CHANGELOG.md +++ b/gems/aws-sdk-medialive/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.116.0 (2024-02-21) +------------------ + +* Feature - MediaLive now supports the ability to restart pipelines in a running channel. + 1.115.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-medialive/VERSION b/gems/aws-sdk-medialive/VERSION index 97ee99fccc8..10c8812fe5e 100644 --- a/gems/aws-sdk-medialive/VERSION +++ b/gems/aws-sdk-medialive/VERSION @@ -1 +1 @@ -1.115.0 +1.116.0 diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb index 2683f6ebb8c..46326b8700a 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb @@ -53,6 +53,6 @@ # @!group service module Aws::MediaLive - GEM_VERSION = '1.115.0' + GEM_VERSION = '1.116.0' end diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb index cb03d9e8776..ca1fe3bcfb5 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb @@ -3918,6 +3918,7 @@ def describe_input_device(params = {}, options = {}) # @option params [required, String] :input_device_id # # @option params [required, String] :accept + # The HTTP Accept header. Indicates the requested type fothe thumbnail. # # @return [Types::DescribeInputDeviceThumbnailResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # @@ -8705,6 +8706,717 @@ def update_reservation(params = {}, options = {}) req.send_request(options) end + # Restart pipelines in one channel that is currently running. + # + # @option params [required, String] :channel_id + # + # @option params [Array] :pipeline_ids + # An array of pipelines to restart in this channel. Format PIPELINE\_0 + # or PIPELINE\_1. + # + # @return [Types::RestartChannelPipelinesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::RestartChannelPipelinesResponse#arn #arn} => String + # * {Types::RestartChannelPipelinesResponse#cdi_input_specification #cdi_input_specification} => Types::CdiInputSpecification + # * {Types::RestartChannelPipelinesResponse#channel_class #channel_class} => String + # * {Types::RestartChannelPipelinesResponse#destinations #destinations} => Array<Types::OutputDestination> + # * {Types::RestartChannelPipelinesResponse#egress_endpoints #egress_endpoints} => Array<Types::ChannelEgressEndpoint> + # * {Types::RestartChannelPipelinesResponse#encoder_settings #encoder_settings} => Types::EncoderSettings + # * {Types::RestartChannelPipelinesResponse#id #id} => String + # * {Types::RestartChannelPipelinesResponse#input_attachments #input_attachments} => Array<Types::InputAttachment> + # * {Types::RestartChannelPipelinesResponse#input_specification #input_specification} => Types::InputSpecification + # * {Types::RestartChannelPipelinesResponse#log_level #log_level} => String + # * {Types::RestartChannelPipelinesResponse#maintenance #maintenance} => Types::MaintenanceStatus + # * {Types::RestartChannelPipelinesResponse#maintenance_status #maintenance_status} => String + # * {Types::RestartChannelPipelinesResponse#name #name} => String + # * {Types::RestartChannelPipelinesResponse#pipeline_details #pipeline_details} => Array<Types::PipelineDetail> + # * {Types::RestartChannelPipelinesResponse#pipelines_running_count #pipelines_running_count} => Integer + # * {Types::RestartChannelPipelinesResponse#role_arn #role_arn} => String + # * {Types::RestartChannelPipelinesResponse#state #state} => String + # * {Types::RestartChannelPipelinesResponse#tags #tags} => Hash<String,String> + # * {Types::RestartChannelPipelinesResponse#vpc #vpc} => Types::VpcOutputSettingsDescription + # + # @example Request syntax with placeholder values + # + # resp = client.restart_channel_pipelines({ + # channel_id: "__string", # required + # pipeline_ids: ["PIPELINE_0"], # accepts PIPELINE_0, PIPELINE_1 + # }) + # + # @example Response structure + # + # resp.arn #=> String + # resp.cdi_input_specification.resolution #=> String, one of "SD", "HD", "FHD", "UHD" + # resp.channel_class #=> String, one of "STANDARD", "SINGLE_PIPELINE" + # resp.destinations #=> Array + # resp.destinations[0].id #=> String + # resp.destinations[0].media_package_settings #=> Array + # resp.destinations[0].media_package_settings[0].channel_id #=> String + # resp.destinations[0].multiplex_settings.multiplex_id #=> String + # resp.destinations[0].multiplex_settings.program_name #=> String + # resp.destinations[0].settings #=> Array + # resp.destinations[0].settings[0].password_param #=> String + # resp.destinations[0].settings[0].stream_name #=> String + # resp.destinations[0].settings[0].url #=> String + # resp.destinations[0].settings[0].username #=> String + # resp.egress_endpoints #=> Array + # resp.egress_endpoints[0].source_ip #=> String + # resp.encoder_settings.audio_descriptions #=> Array + # resp.encoder_settings.audio_descriptions[0].audio_normalization_settings.algorithm #=> String, one of "ITU_1770_1", "ITU_1770_2" + # resp.encoder_settings.audio_descriptions[0].audio_normalization_settings.algorithm_control #=> String, one of "CORRECT_AUDIO" + # resp.encoder_settings.audio_descriptions[0].audio_normalization_settings.target_lkfs #=> Float + # resp.encoder_settings.audio_descriptions[0].audio_selector_name #=> String + # resp.encoder_settings.audio_descriptions[0].audio_type #=> String, one of "CLEAN_EFFECTS", "HEARING_IMPAIRED", "UNDEFINED", "VISUAL_IMPAIRED_COMMENTARY" + # resp.encoder_settings.audio_descriptions[0].audio_type_control #=> String, one of "FOLLOW_INPUT", "USE_CONFIGURED" + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_cbet_settings.cbet_check_digit_string #=> String + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_cbet_settings.cbet_stepaside #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_cbet_settings.csid #=> String + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_distribution_type #=> String, one of "FINAL_DISTRIBUTOR", "PROGRAM_CONTENT" + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_naes_ii_nw_settings.check_digit_string #=> String + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_naes_ii_nw_settings.sid #=> Float + # resp.encoder_settings.audio_descriptions[0].audio_watermarking_settings.nielsen_watermarks_settings.nielsen_naes_ii_nw_settings.timezone #=> String, one of "AMERICA_PUERTO_RICO", "US_ALASKA", "US_ARIZONA", "US_CENTRAL", "US_EASTERN", "US_HAWAII", "US_MOUNTAIN", "US_PACIFIC", "US_SAMOA", "UTC" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.bitrate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.coding_mode #=> String, one of "AD_RECEIVER_MIX", "CODING_MODE_1_0", "CODING_MODE_1_1", "CODING_MODE_2_0", "CODING_MODE_5_1" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.input_type #=> String, one of "BROADCASTER_MIXED_AD", "NORMAL" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.profile #=> String, one of "HEV1", "HEV2", "LC" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.rate_control_mode #=> String, one of "CBR", "VBR" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.raw_format #=> String, one of "LATM_LOAS", "NONE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.sample_rate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.spec #=> String, one of "MPEG2", "MPEG4" + # resp.encoder_settings.audio_descriptions[0].codec_settings.aac_settings.vbr_quality #=> String, one of "HIGH", "LOW", "MEDIUM_HIGH", "MEDIUM_LOW" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.bitrate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.bitstream_mode #=> String, one of "COMMENTARY", "COMPLETE_MAIN", "DIALOGUE", "EMERGENCY", "HEARING_IMPAIRED", "MUSIC_AND_EFFECTS", "VISUALLY_IMPAIRED", "VOICE_OVER" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.coding_mode #=> String, one of "CODING_MODE_1_0", "CODING_MODE_1_1", "CODING_MODE_2_0", "CODING_MODE_3_2_LFE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.dialnorm #=> Integer + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.drc_profile #=> String, one of "FILM_STANDARD", "NONE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.lfe_filter #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.metadata_control #=> String, one of "FOLLOW_INPUT", "USE_CONFIGURED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.ac_3_settings.attenuation_control #=> String, one of "ATTENUATE_3_DB", "NONE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.bitrate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.coding_mode #=> String, one of "CODING_MODE_5_1_4", "CODING_MODE_7_1_4", "CODING_MODE_9_1_6" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.dialnorm #=> Integer + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.drc_line #=> String, one of "FILM_LIGHT", "FILM_STANDARD", "MUSIC_LIGHT", "MUSIC_STANDARD", "NONE", "SPEECH" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.drc_rf #=> String, one of "FILM_LIGHT", "FILM_STANDARD", "MUSIC_LIGHT", "MUSIC_STANDARD", "NONE", "SPEECH" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.height_trim #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_atmos_settings.surround_trim #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.attenuation_control #=> String, one of "ATTENUATE_3_DB", "NONE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.bitrate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.bitstream_mode #=> String, one of "COMMENTARY", "COMPLETE_MAIN", "EMERGENCY", "HEARING_IMPAIRED", "VISUALLY_IMPAIRED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.coding_mode #=> String, one of "CODING_MODE_1_0", "CODING_MODE_2_0", "CODING_MODE_3_2" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.dc_filter #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.dialnorm #=> Integer + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.drc_line #=> String, one of "FILM_LIGHT", "FILM_STANDARD", "MUSIC_LIGHT", "MUSIC_STANDARD", "NONE", "SPEECH" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.drc_rf #=> String, one of "FILM_LIGHT", "FILM_STANDARD", "MUSIC_LIGHT", "MUSIC_STANDARD", "NONE", "SPEECH" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lfe_control #=> String, one of "LFE", "NO_LFE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lfe_filter #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lo_ro_center_mix_level #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lo_ro_surround_mix_level #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lt_rt_center_mix_level #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.lt_rt_surround_mix_level #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.metadata_control #=> String, one of "FOLLOW_INPUT", "USE_CONFIGURED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.passthrough_control #=> String, one of "NO_PASSTHROUGH", "WHEN_POSSIBLE" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.phase_control #=> String, one of "NO_SHIFT", "SHIFT_90_DEGREES" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.stereo_downmix #=> String, one of "DPL2", "LO_RO", "LT_RT", "NOT_INDICATED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.surround_ex_mode #=> String, one of "DISABLED", "ENABLED", "NOT_INDICATED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.eac_3_settings.surround_mode #=> String, one of "DISABLED", "ENABLED", "NOT_INDICATED" + # resp.encoder_settings.audio_descriptions[0].codec_settings.mp_2_settings.bitrate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.mp_2_settings.coding_mode #=> String, one of "CODING_MODE_1_0", "CODING_MODE_2_0" + # resp.encoder_settings.audio_descriptions[0].codec_settings.mp_2_settings.sample_rate #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.wav_settings.bit_depth #=> Float + # resp.encoder_settings.audio_descriptions[0].codec_settings.wav_settings.coding_mode #=> String, one of "CODING_MODE_1_0", "CODING_MODE_2_0", "CODING_MODE_4_0", "CODING_MODE_8_0" + # resp.encoder_settings.audio_descriptions[0].codec_settings.wav_settings.sample_rate #=> Float + # resp.encoder_settings.audio_descriptions[0].language_code #=> String + # resp.encoder_settings.audio_descriptions[0].language_code_control #=> String, one of "FOLLOW_INPUT", "USE_CONFIGURED" + # resp.encoder_settings.audio_descriptions[0].name #=> String + # resp.encoder_settings.audio_descriptions[0].remix_settings.channel_mappings #=> Array + # resp.encoder_settings.audio_descriptions[0].remix_settings.channel_mappings[0].input_channel_levels #=> Array + # resp.encoder_settings.audio_descriptions[0].remix_settings.channel_mappings[0].input_channel_levels[0].gain #=> Integer + # resp.encoder_settings.audio_descriptions[0].remix_settings.channel_mappings[0].input_channel_levels[0].input_channel #=> Integer + # resp.encoder_settings.audio_descriptions[0].remix_settings.channel_mappings[0].output_channel #=> Integer + # resp.encoder_settings.audio_descriptions[0].remix_settings.channels_in #=> Integer + # resp.encoder_settings.audio_descriptions[0].remix_settings.channels_out #=> Integer + # resp.encoder_settings.audio_descriptions[0].stream_name #=> String + # resp.encoder_settings.avail_blanking.avail_blanking_image.password_param #=> String + # resp.encoder_settings.avail_blanking.avail_blanking_image.uri #=> String + # resp.encoder_settings.avail_blanking.avail_blanking_image.username #=> String + # resp.encoder_settings.avail_blanking.state #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.avail_configuration.avail_settings.esam.acquisition_point_id #=> String + # resp.encoder_settings.avail_configuration.avail_settings.esam.ad_avail_offset #=> Integer + # resp.encoder_settings.avail_configuration.avail_settings.esam.password_param #=> String + # resp.encoder_settings.avail_configuration.avail_settings.esam.pois_endpoint #=> String + # resp.encoder_settings.avail_configuration.avail_settings.esam.username #=> String + # resp.encoder_settings.avail_configuration.avail_settings.esam.zone_identity #=> String + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_splice_insert.ad_avail_offset #=> Integer + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_splice_insert.no_regional_blackout_flag #=> String, one of "FOLLOW", "IGNORE" + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_splice_insert.web_delivery_allowed_flag #=> String, one of "FOLLOW", "IGNORE" + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_time_signal_apos.ad_avail_offset #=> Integer + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_time_signal_apos.no_regional_blackout_flag #=> String, one of "FOLLOW", "IGNORE" + # resp.encoder_settings.avail_configuration.avail_settings.scte_35_time_signal_apos.web_delivery_allowed_flag #=> String, one of "FOLLOW", "IGNORE" + # resp.encoder_settings.blackout_slate.blackout_slate_image.password_param #=> String + # resp.encoder_settings.blackout_slate.blackout_slate_image.uri #=> String + # resp.encoder_settings.blackout_slate.blackout_slate_image.username #=> String + # resp.encoder_settings.blackout_slate.network_end_blackout #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.blackout_slate.network_end_blackout_image.password_param #=> String + # resp.encoder_settings.blackout_slate.network_end_blackout_image.uri #=> String + # resp.encoder_settings.blackout_slate.network_end_blackout_image.username #=> String + # resp.encoder_settings.blackout_slate.network_id #=> String + # resp.encoder_settings.blackout_slate.state #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.caption_descriptions #=> Array + # resp.encoder_settings.caption_descriptions[0].accessibility #=> String, one of "DOES_NOT_IMPLEMENT_ACCESSIBILITY_FEATURES", "IMPLEMENTS_ACCESSIBILITY_FEATURES" + # resp.encoder_settings.caption_descriptions[0].caption_selector_name #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.alignment #=> String, one of "CENTERED", "LEFT", "SMART" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.background_color #=> String, one of "BLACK", "NONE", "WHITE" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.background_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font.password_param #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font.uri #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font.username #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font_color #=> String, one of "BLACK", "BLUE", "GREEN", "RED", "WHITE", "YELLOW" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font_resolution #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.font_size #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.outline_color #=> String, one of "BLACK", "BLUE", "GREEN", "RED", "WHITE", "YELLOW" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.outline_size #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.shadow_color #=> String, one of "BLACK", "NONE", "WHITE" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.shadow_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.shadow_x_offset #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.shadow_y_offset #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.teletext_grid_control #=> String, one of "FIXED", "SCALED" + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.x_position #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.burn_in_destination_settings.y_position #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.alignment #=> String, one of "CENTERED", "LEFT", "SMART" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.background_color #=> String, one of "BLACK", "NONE", "WHITE" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.background_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font.password_param #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font.uri #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font.username #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font_color #=> String, one of "BLACK", "BLUE", "GREEN", "RED", "WHITE", "YELLOW" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font_resolution #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.font_size #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.outline_color #=> String, one of "BLACK", "BLUE", "GREEN", "RED", "WHITE", "YELLOW" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.outline_size #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.shadow_color #=> String, one of "BLACK", "NONE", "WHITE" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.shadow_opacity #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.shadow_x_offset #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.shadow_y_offset #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.teletext_grid_control #=> String, one of "FIXED", "SCALED" + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.x_position #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.dvb_sub_destination_settings.y_position #=> Integer + # resp.encoder_settings.caption_descriptions[0].destination_settings.ebu_tt_d_destination_settings.copyright_holder #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.ebu_tt_d_destination_settings.fill_line_gap #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.caption_descriptions[0].destination_settings.ebu_tt_d_destination_settings.font_family #=> String + # resp.encoder_settings.caption_descriptions[0].destination_settings.ebu_tt_d_destination_settings.style_control #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.caption_descriptions[0].destination_settings.ttml_destination_settings.style_control #=> String, one of "PASSTHROUGH", "USE_CONFIGURED" + # resp.encoder_settings.caption_descriptions[0].destination_settings.webvtt_destination_settings.style_control #=> String, one of "NO_STYLE_DATA", "PASSTHROUGH" + # resp.encoder_settings.caption_descriptions[0].language_code #=> String + # resp.encoder_settings.caption_descriptions[0].language_description #=> String + # resp.encoder_settings.caption_descriptions[0].name #=> String + # resp.encoder_settings.feature_activations.input_prepare_schedule_actions #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.feature_activations.output_static_image_overlay_schedule_actions #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.global_configuration.initial_audio_gain #=> Integer + # resp.encoder_settings.global_configuration.input_end_action #=> String, one of "NONE", "SWITCH_AND_LOOP_INPUTS" + # resp.encoder_settings.global_configuration.input_loss_behavior.black_frame_msec #=> Integer + # resp.encoder_settings.global_configuration.input_loss_behavior.input_loss_image_color #=> String + # resp.encoder_settings.global_configuration.input_loss_behavior.input_loss_image_slate.password_param #=> String + # resp.encoder_settings.global_configuration.input_loss_behavior.input_loss_image_slate.uri #=> String + # resp.encoder_settings.global_configuration.input_loss_behavior.input_loss_image_slate.username #=> String + # resp.encoder_settings.global_configuration.input_loss_behavior.input_loss_image_type #=> String, one of "COLOR", "SLATE" + # resp.encoder_settings.global_configuration.input_loss_behavior.repeat_frame_msec #=> Integer + # resp.encoder_settings.global_configuration.output_locking_mode #=> String, one of "EPOCH_LOCKING", "PIPELINE_LOCKING" + # resp.encoder_settings.global_configuration.output_timing_source #=> String, one of "INPUT_CLOCK", "SYSTEM_CLOCK" + # resp.encoder_settings.global_configuration.support_low_framerate_inputs #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.global_configuration.output_locking_settings.epoch_locking_settings.custom_epoch #=> String + # resp.encoder_settings.global_configuration.output_locking_settings.epoch_locking_settings.jam_sync_time #=> String + # resp.encoder_settings.motion_graphics_configuration.motion_graphics_insertion #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.nielsen_configuration.distributor_id #=> String + # resp.encoder_settings.nielsen_configuration.nielsen_pcm_to_id_3_tagging #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups #=> Array + # resp.encoder_settings.output_groups[0].name #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.archive_group_settings.archive_cdn_settings.archive_s3_settings.canned_acl #=> String, one of "AUTHENTICATED_READ", "BUCKET_OWNER_FULL_CONTROL", "BUCKET_OWNER_READ", "PUBLIC_READ" + # resp.encoder_settings.output_groups[0].output_group_settings.archive_group_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.archive_group_settings.rollover_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.frame_capture_group_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.frame_capture_group_settings.frame_capture_cdn_settings.frame_capture_s3_settings.canned_acl #=> String, one of "AUTHENTICATED_READ", "BUCKET_OWNER_FULL_CONTROL", "BUCKET_OWNER_READ", "PUBLIC_READ" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.ad_markers #=> Array + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.ad_markers[0] #=> String, one of "ADOBE", "ELEMENTAL", "ELEMENTAL_SCTE35" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.base_url_content #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.base_url_content_1 #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.base_url_manifest #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.base_url_manifest_1 #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.caption_language_mappings #=> Array + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.caption_language_mappings[0].caption_channel #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.caption_language_mappings[0].language_code #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.caption_language_mappings[0].language_description #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.caption_language_setting #=> String, one of "INSERT", "NONE", "OMIT" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.client_cache #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.codec_specification #=> String, one of "RFC_4281", "RFC_6381" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.constant_iv #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.directory_structure #=> String, one of "SINGLE_DIRECTORY", "SUBDIRECTORY_PER_STREAM" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.discontinuity_tags #=> String, one of "INSERT", "NEVER_INSERT" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.encryption_type #=> String, one of "AES128", "SAMPLE_AES" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.filecache_duration #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.http_transfer_mode #=> String, one of "CHUNKED", "NON_CHUNKED" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.salt #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_akamai_settings.token #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_basic_put_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_basic_put_settings.filecache_duration #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_basic_put_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_basic_put_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_media_store_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_media_store_settings.filecache_duration #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_media_store_settings.media_store_storage_class #=> String, one of "TEMPORAL" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_media_store_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_media_store_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_s3_settings.canned_acl #=> String, one of "AUTHENTICATED_READ", "BUCKET_OWNER_FULL_CONTROL", "BUCKET_OWNER_READ", "PUBLIC_READ" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_webdav_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_webdav_settings.filecache_duration #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_webdav_settings.http_transfer_mode #=> String, one of "CHUNKED", "NON_CHUNKED" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_webdav_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_cdn_settings.hls_webdav_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.hls_id_3_segment_tagging #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.i_frame_only_playlists #=> String, one of "DISABLED", "STANDARD" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.incomplete_segment_behavior #=> String, one of "AUTO", "SUPPRESS" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.index_n_segments #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.input_loss_action #=> String, one of "EMIT_OUTPUT", "PAUSE_OUTPUT" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.iv_in_manifest #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.iv_source #=> String, one of "EXPLICIT", "FOLLOWS_SEGMENT_NUMBER" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.keep_segments #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_format #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_format_versions #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_provider_settings.static_key_settings.key_provider_server.password_param #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_provider_settings.static_key_settings.key_provider_server.uri #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_provider_settings.static_key_settings.key_provider_server.username #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.key_provider_settings.static_key_settings.static_key_value #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.manifest_compression #=> String, one of "GZIP", "NONE" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.manifest_duration_format #=> String, one of "FLOATING_POINT", "INTEGER" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.min_segment_length #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.mode #=> String, one of "LIVE", "VOD" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.output_selection #=> String, one of "MANIFESTS_AND_SEGMENTS", "SEGMENTS_ONLY", "VARIANT_MANIFESTS_AND_SEGMENTS" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.program_date_time #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.program_date_time_clock #=> String, one of "INITIALIZE_FROM_OUTPUT_TIMECODE", "SYSTEM_CLOCK" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.program_date_time_period #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.redundant_manifest #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.segment_length #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.segmentation_mode #=> String, one of "USE_INPUT_SEGMENTATION", "USE_SEGMENT_DURATION" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.segments_per_subdirectory #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.stream_inf_resolution #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.timed_metadata_id_3_frame #=> String, one of "NONE", "PRIV", "TDRL" + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.timed_metadata_id_3_period #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.timestamp_delta_milliseconds #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.hls_group_settings.ts_file_mode #=> String, one of "SEGMENTED_FILES", "SINGLE_FILE" + # resp.encoder_settings.output_groups[0].output_group_settings.media_package_group_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.acquisition_point_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.audio_only_timecode_control #=> String, one of "PASSTHROUGH", "USE_CONFIGURED_CLOCK" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.certificate_mode #=> String, one of "SELF_SIGNED", "VERIFY_AUTHENTICITY" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.event_id #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.event_id_mode #=> String, one of "NO_EVENT_ID", "USE_CONFIGURED", "USE_TIMESTAMP" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.event_stop_behavior #=> String, one of "NONE", "SEND_EOS" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.filecache_duration #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.fragment_length #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.input_loss_action #=> String, one of "EMIT_OUTPUT", "PAUSE_OUTPUT" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.segmentation_mode #=> String, one of "USE_INPUT_SEGMENTATION", "USE_SEGMENT_DURATION" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.send_delay_ms #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.sparse_track_type #=> String, one of "NONE", "SCTE_35", "SCTE_35_WITHOUT_SEGMENTATION" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.stream_manifest_behavior #=> String, one of "DO_NOT_SEND", "SEND" + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.timestamp_offset #=> String + # resp.encoder_settings.output_groups[0].output_group_settings.ms_smooth_group_settings.timestamp_offset_mode #=> String, one of "USE_CONFIGURED_OFFSET", "USE_EVENT_START_DATE" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.ad_markers #=> Array + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.ad_markers[0] #=> String, one of "ON_CUE_POINT_SCTE35" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.authentication_scheme #=> String, one of "AKAMAI", "COMMON" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.cache_full_behavior #=> String, one of "DISCONNECT_IMMEDIATELY", "WAIT_FOR_SERVER" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.cache_length #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.caption_data #=> String, one of "ALL", "FIELD1_608", "FIELD1_AND_FIELD2_608" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.input_loss_action #=> String, one of "EMIT_OUTPUT", "PAUSE_OUTPUT" + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.restart_delay #=> Integer + # resp.encoder_settings.output_groups[0].output_group_settings.rtmp_group_settings.include_filler_nal_units #=> String, one of "AUTO", "DROP", "INCLUDE" + # resp.encoder_settings.output_groups[0].output_group_settings.udp_group_settings.input_loss_action #=> String, one of "DROP_PROGRAM", "DROP_TS", "EMIT_PROGRAM" + # resp.encoder_settings.output_groups[0].output_group_settings.udp_group_settings.timed_metadata_id_3_frame #=> String, one of "NONE", "PRIV", "TDRL" + # resp.encoder_settings.output_groups[0].output_group_settings.udp_group_settings.timed_metadata_id_3_period #=> Integer + # resp.encoder_settings.output_groups[0].outputs #=> Array + # resp.encoder_settings.output_groups[0].outputs[0].audio_description_names #=> Array + # resp.encoder_settings.output_groups[0].outputs[0].audio_description_names[0] #=> String + # resp.encoder_settings.output_groups[0].outputs[0].caption_description_names #=> Array + # resp.encoder_settings.output_groups[0].outputs[0].caption_description_names[0] #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.absent_input_audio_behavior #=> String, one of "DROP", "ENCODE_SILENCE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.arib #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.arib_captions_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.arib_captions_pid_control #=> String, one of "AUTO", "USE_CONFIGURED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.audio_buffer_model #=> String, one of "ATSC", "DVB" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.audio_frames_per_pes #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.audio_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.audio_stream_type #=> String, one of "ATSC", "DVB" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.bitrate #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.buffer_model #=> String, one of "MULTIPLEX", "NONE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.cc_descriptor #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_nit_settings.network_id #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_nit_settings.network_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_nit_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.output_sdt #=> String, one of "SDT_FOLLOW", "SDT_FOLLOW_IF_PRESENT", "SDT_MANUAL", "SDT_NONE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.service_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.service_provider_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_sub_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_tdt_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.dvb_teletext_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.ebif #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.ebp_audio_interval #=> String, one of "VIDEO_AND_FIXED_INTERVALS", "VIDEO_INTERVAL" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.ebp_lookahead_ms #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.ebp_placement #=> String, one of "VIDEO_AND_AUDIO_PIDS", "VIDEO_PID" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.ecm_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.es_rate_in_pes #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.etv_platform_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.etv_signal_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.fragment_time #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.klv #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.klv_data_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.nielsen_id_3_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.null_packet_bitrate #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pat_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pcr_control #=> String, one of "CONFIGURED_PCR_PERIOD", "PCR_EVERY_PES_PACKET" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pcr_period #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pcr_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pmt_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.pmt_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.program_num #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.rate_mode #=> String, one of "CBR", "VBR" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.scte_27_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.scte_35_control #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.scte_35_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.scte_35_preroll_pullup_milliseconds #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.segmentation_markers #=> String, one of "EBP", "EBP_LEGACY", "NONE", "PSI_SEGSTART", "RAI_ADAPT", "RAI_SEGSTART" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.segmentation_style #=> String, one of "MAINTAIN_CADENCE", "RESET_CADENCE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.segmentation_time #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.timed_metadata_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.timed_metadata_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.transport_stream_id #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.container_settings.m2ts_settings.video_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.extension #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.archive_output_settings.name_modifier #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.frame_capture_output_settings.name_modifier #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.h265_packaging_type #=> String, one of "HEV1", "HVC1" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.audio_group_id #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.audio_only_image.password_param #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.audio_only_image.uri #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.audio_only_image.username #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.audio_track_type #=> String, one of "ALTERNATE_AUDIO_AUTO_SELECT", "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT", "ALTERNATE_AUDIO_NOT_AUTO_SELECT", "AUDIO_ONLY_VARIANT_STREAM" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.audio_only_hls_settings.segment_type #=> String, one of "AAC", "FMP4" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.fmp_4_hls_settings.audio_rendition_sets #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.fmp_4_hls_settings.nielsen_id_3_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.fmp_4_hls_settings.timed_metadata_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.audio_rendition_sets #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.audio_frames_per_pes #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.audio_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.ecm_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.nielsen_id_3_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pat_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pcr_control #=> String, one of "CONFIGURED_PCR_PERIOD", "PCR_EVERY_PES_PACKET" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pcr_period #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pcr_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pmt_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.pmt_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.program_num #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.scte_35_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.scte_35_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.timed_metadata_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.timed_metadata_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.transport_stream_id #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.video_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.klv_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.hls_settings.standard_hls_settings.m3u_8_settings.klv_data_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.name_modifier #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.hls_output_settings.segment_modifier #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.ms_smooth_output_settings.h265_packaging_type #=> String, one of "HEV1", "HVC1" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.ms_smooth_output_settings.name_modifier #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.multiplex_output_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.rtmp_output_settings.certificate_mode #=> String, one of "SELF_SIGNED", "VERIFY_AUTHENTICITY" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.rtmp_output_settings.connection_retry_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.rtmp_output_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.rtmp_output_settings.num_retries #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.buffer_msec #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.absent_input_audio_behavior #=> String, one of "DROP", "ENCODE_SILENCE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.arib #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.arib_captions_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.arib_captions_pid_control #=> String, one of "AUTO", "USE_CONFIGURED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.audio_buffer_model #=> String, one of "ATSC", "DVB" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.audio_frames_per_pes #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.audio_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.audio_stream_type #=> String, one of "ATSC", "DVB" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.bitrate #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.buffer_model #=> String, one of "MULTIPLEX", "NONE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.cc_descriptor #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_nit_settings.network_id #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_nit_settings.network_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_nit_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.output_sdt #=> String, one of "SDT_FOLLOW", "SDT_FOLLOW_IF_PRESENT", "SDT_MANUAL", "SDT_NONE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.service_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_sdt_settings.service_provider_name #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_sub_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_tdt_settings.rep_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.dvb_teletext_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.ebif #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.ebp_audio_interval #=> String, one of "VIDEO_AND_FIXED_INTERVALS", "VIDEO_INTERVAL" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.ebp_lookahead_ms #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.ebp_placement #=> String, one of "VIDEO_AND_AUDIO_PIDS", "VIDEO_PID" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.ecm_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.es_rate_in_pes #=> String, one of "EXCLUDE", "INCLUDE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.etv_platform_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.etv_signal_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.fragment_time #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.klv #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.klv_data_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.nielsen_id_3_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.null_packet_bitrate #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pat_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pcr_control #=> String, one of "CONFIGURED_PCR_PERIOD", "PCR_EVERY_PES_PACKET" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pcr_period #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pcr_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pmt_interval #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.pmt_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.program_num #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.rate_mode #=> String, one of "CBR", "VBR" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.scte_27_pids #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.scte_35_control #=> String, one of "NONE", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.scte_35_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.scte_35_preroll_pullup_milliseconds #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.segmentation_markers #=> String, one of "EBP", "EBP_LEGACY", "NONE", "PSI_SEGSTART", "RAI_ADAPT", "RAI_SEGSTART" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.segmentation_style #=> String, one of "MAINTAIN_CADENCE", "RESET_CADENCE" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.segmentation_time #=> Float + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.timed_metadata_behavior #=> String, one of "NO_PASSTHROUGH", "PASSTHROUGH" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.timed_metadata_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.transport_stream_id #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.container_settings.m2ts_settings.video_pid #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.destination.destination_ref_id #=> String + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.fec_output_settings.column_depth #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.fec_output_settings.include_fec #=> String, one of "COLUMN", "COLUMN_AND_ROW" + # resp.encoder_settings.output_groups[0].outputs[0].output_settings.udp_output_settings.fec_output_settings.row_length #=> Integer + # resp.encoder_settings.output_groups[0].outputs[0].video_description_name #=> String + # resp.encoder_settings.timecode_config.source #=> String, one of "EMBEDDED", "SYSTEMCLOCK", "ZEROBASED" + # resp.encoder_settings.timecode_config.sync_threshold #=> Integer + # resp.encoder_settings.video_descriptions #=> Array + # resp.encoder_settings.video_descriptions[0].codec_settings.frame_capture_settings.capture_interval #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.frame_capture_settings.capture_interval_units #=> String, one of "MILLISECONDS", "SECONDS" + # resp.encoder_settings.video_descriptions[0].codec_settings.frame_capture_settings.timecode_burnin_settings.font_size #=> String, one of "EXTRA_SMALL_10", "LARGE_48", "MEDIUM_32", "SMALL_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.frame_capture_settings.timecode_burnin_settings.position #=> String, one of "BOTTOM_CENTER", "BOTTOM_LEFT", "BOTTOM_RIGHT", "MIDDLE_CENTER", "MIDDLE_LEFT", "MIDDLE_RIGHT", "TOP_CENTER", "TOP_LEFT", "TOP_RIGHT" + # resp.encoder_settings.video_descriptions[0].codec_settings.frame_capture_settings.timecode_burnin_settings.prefix #=> String + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.adaptive_quantization #=> String, one of "AUTO", "HIGH", "HIGHER", "LOW", "MAX", "MEDIUM", "OFF" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.afd_signaling #=> String, one of "AUTO", "FIXED", "NONE" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.bitrate #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.buf_fill_pct #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.buf_size #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.color_metadata #=> String, one of "IGNORE", "INSERT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.entropy_encoding #=> String, one of "CABAC", "CAVLC" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.filter_settings.temporal_filter_settings.post_filter_sharpening #=> String, one of "AUTO", "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.filter_settings.temporal_filter_settings.strength #=> String, one of "AUTO", "STRENGTH_1", "STRENGTH_2", "STRENGTH_3", "STRENGTH_4", "STRENGTH_5", "STRENGTH_6", "STRENGTH_7", "STRENGTH_8", "STRENGTH_9", "STRENGTH_10", "STRENGTH_11", "STRENGTH_12", "STRENGTH_13", "STRENGTH_14", "STRENGTH_15", "STRENGTH_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.fixed_afd #=> String, one of "AFD_0000", "AFD_0010", "AFD_0011", "AFD_0100", "AFD_1000", "AFD_1001", "AFD_1010", "AFD_1011", "AFD_1101", "AFD_1110", "AFD_1111" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.flicker_aq #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.force_field_pictures #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.framerate_control #=> String, one of "INITIALIZE_FROM_SOURCE", "SPECIFIED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.framerate_denominator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.framerate_numerator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.gop_b_reference #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.gop_closed_cadence #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.gop_num_b_frames #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.gop_size #=> Float + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.gop_size_units #=> String, one of "FRAMES", "SECONDS" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.level #=> String, one of "H264_LEVEL_1", "H264_LEVEL_1_1", "H264_LEVEL_1_2", "H264_LEVEL_1_3", "H264_LEVEL_2", "H264_LEVEL_2_1", "H264_LEVEL_2_2", "H264_LEVEL_3", "H264_LEVEL_3_1", "H264_LEVEL_3_2", "H264_LEVEL_4", "H264_LEVEL_4_1", "H264_LEVEL_4_2", "H264_LEVEL_5", "H264_LEVEL_5_1", "H264_LEVEL_5_2", "H264_LEVEL_AUTO" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.look_ahead_rate_control #=> String, one of "HIGH", "LOW", "MEDIUM" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.max_bitrate #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.min_i_interval #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.num_ref_frames #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.par_control #=> String, one of "INITIALIZE_FROM_SOURCE", "SPECIFIED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.par_denominator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.par_numerator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.profile #=> String, one of "BASELINE", "HIGH", "HIGH_10BIT", "HIGH_422", "HIGH_422_10BIT", "MAIN" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.quality_level #=> String, one of "ENHANCED_QUALITY", "STANDARD_QUALITY" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.qvbr_quality_level #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.rate_control_mode #=> String, one of "CBR", "MULTIPLEX", "QVBR", "VBR" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.scan_type #=> String, one of "INTERLACED", "PROGRESSIVE" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.scene_change_detect #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.slices #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.softness #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.spatial_aq #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.subgop_length #=> String, one of "DYNAMIC", "FIXED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.syntax #=> String, one of "DEFAULT", "RP2027" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.temporal_aq #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.timecode_insertion #=> String, one of "DISABLED", "PIC_TIMING_SEI" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.timecode_burnin_settings.font_size #=> String, one of "EXTRA_SMALL_10", "LARGE_48", "MEDIUM_32", "SMALL_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.timecode_burnin_settings.position #=> String, one of "BOTTOM_CENTER", "BOTTOM_LEFT", "BOTTOM_RIGHT", "MIDDLE_CENTER", "MIDDLE_LEFT", "MIDDLE_RIGHT", "TOP_CENTER", "TOP_LEFT", "TOP_RIGHT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h264_settings.timecode_burnin_settings.prefix #=> String + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.adaptive_quantization #=> String, one of "AUTO", "HIGH", "HIGHER", "LOW", "MAX", "MEDIUM", "OFF" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.afd_signaling #=> String, one of "AUTO", "FIXED", "NONE" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.alternative_transfer_function #=> String, one of "INSERT", "OMIT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.bitrate #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.buf_size #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.color_metadata #=> String, one of "IGNORE", "INSERT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.color_space_settings.hdr_10_settings.max_cll #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.color_space_settings.hdr_10_settings.max_fall #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.filter_settings.temporal_filter_settings.post_filter_sharpening #=> String, one of "AUTO", "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.filter_settings.temporal_filter_settings.strength #=> String, one of "AUTO", "STRENGTH_1", "STRENGTH_2", "STRENGTH_3", "STRENGTH_4", "STRENGTH_5", "STRENGTH_6", "STRENGTH_7", "STRENGTH_8", "STRENGTH_9", "STRENGTH_10", "STRENGTH_11", "STRENGTH_12", "STRENGTH_13", "STRENGTH_14", "STRENGTH_15", "STRENGTH_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.fixed_afd #=> String, one of "AFD_0000", "AFD_0010", "AFD_0011", "AFD_0100", "AFD_1000", "AFD_1001", "AFD_1010", "AFD_1011", "AFD_1101", "AFD_1110", "AFD_1111" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.flicker_aq #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.framerate_denominator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.framerate_numerator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.gop_closed_cadence #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.gop_size #=> Float + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.gop_size_units #=> String, one of "FRAMES", "SECONDS" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.level #=> String, one of "H265_LEVEL_1", "H265_LEVEL_2", "H265_LEVEL_2_1", "H265_LEVEL_3", "H265_LEVEL_3_1", "H265_LEVEL_4", "H265_LEVEL_4_1", "H265_LEVEL_5", "H265_LEVEL_5_1", "H265_LEVEL_5_2", "H265_LEVEL_6", "H265_LEVEL_6_1", "H265_LEVEL_6_2", "H265_LEVEL_AUTO" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.look_ahead_rate_control #=> String, one of "HIGH", "LOW", "MEDIUM" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.max_bitrate #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.min_i_interval #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.par_denominator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.par_numerator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.profile #=> String, one of "MAIN", "MAIN_10BIT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.qvbr_quality_level #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.rate_control_mode #=> String, one of "CBR", "MULTIPLEX", "QVBR" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.scan_type #=> String, one of "INTERLACED", "PROGRESSIVE" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.scene_change_detect #=> String, one of "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.slices #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.tier #=> String, one of "HIGH", "MAIN" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.timecode_insertion #=> String, one of "DISABLED", "PIC_TIMING_SEI" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.timecode_burnin_settings.font_size #=> String, one of "EXTRA_SMALL_10", "LARGE_48", "MEDIUM_32", "SMALL_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.timecode_burnin_settings.position #=> String, one of "BOTTOM_CENTER", "BOTTOM_LEFT", "BOTTOM_RIGHT", "MIDDLE_CENTER", "MIDDLE_LEFT", "MIDDLE_RIGHT", "TOP_CENTER", "TOP_LEFT", "TOP_RIGHT" + # resp.encoder_settings.video_descriptions[0].codec_settings.h265_settings.timecode_burnin_settings.prefix #=> String + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.adaptive_quantization #=> String, one of "AUTO", "HIGH", "LOW", "MEDIUM", "OFF" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.afd_signaling #=> String, one of "AUTO", "FIXED", "NONE" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.color_metadata #=> String, one of "IGNORE", "INSERT" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.color_space #=> String, one of "AUTO", "PASSTHROUGH" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.display_aspect_ratio #=> String, one of "DISPLAYRATIO16X9", "DISPLAYRATIO4X3" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.filter_settings.temporal_filter_settings.post_filter_sharpening #=> String, one of "AUTO", "DISABLED", "ENABLED" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.filter_settings.temporal_filter_settings.strength #=> String, one of "AUTO", "STRENGTH_1", "STRENGTH_2", "STRENGTH_3", "STRENGTH_4", "STRENGTH_5", "STRENGTH_6", "STRENGTH_7", "STRENGTH_8", "STRENGTH_9", "STRENGTH_10", "STRENGTH_11", "STRENGTH_12", "STRENGTH_13", "STRENGTH_14", "STRENGTH_15", "STRENGTH_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.fixed_afd #=> String, one of "AFD_0000", "AFD_0010", "AFD_0011", "AFD_0100", "AFD_1000", "AFD_1001", "AFD_1010", "AFD_1011", "AFD_1101", "AFD_1110", "AFD_1111" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.framerate_denominator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.framerate_numerator #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.gop_closed_cadence #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.gop_num_b_frames #=> Integer + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.gop_size #=> Float + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.gop_size_units #=> String, one of "FRAMES", "SECONDS" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.scan_type #=> String, one of "INTERLACED", "PROGRESSIVE" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.subgop_length #=> String, one of "DYNAMIC", "FIXED" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.timecode_insertion #=> String, one of "DISABLED", "GOP_TIMECODE" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.timecode_burnin_settings.font_size #=> String, one of "EXTRA_SMALL_10", "LARGE_48", "MEDIUM_32", "SMALL_16" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.timecode_burnin_settings.position #=> String, one of "BOTTOM_CENTER", "BOTTOM_LEFT", "BOTTOM_RIGHT", "MIDDLE_CENTER", "MIDDLE_LEFT", "MIDDLE_RIGHT", "TOP_CENTER", "TOP_LEFT", "TOP_RIGHT" + # resp.encoder_settings.video_descriptions[0].codec_settings.mpeg_2_settings.timecode_burnin_settings.prefix #=> String + # resp.encoder_settings.video_descriptions[0].height #=> Integer + # resp.encoder_settings.video_descriptions[0].name #=> String + # resp.encoder_settings.video_descriptions[0].respond_to_afd #=> String, one of "NONE", "PASSTHROUGH", "RESPOND" + # resp.encoder_settings.video_descriptions[0].scaling_behavior #=> String, one of "DEFAULT", "STRETCH_TO_OUTPUT" + # resp.encoder_settings.video_descriptions[0].sharpness #=> Integer + # resp.encoder_settings.video_descriptions[0].width #=> Integer + # resp.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED" + # resp.encoder_settings.color_correction_settings.global_color_corrections #=> Array + # resp.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709" + # resp.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709" + # resp.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String + # resp.id #=> String + # resp.input_attachments #=> Array + # resp.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions #=> Array + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions[0].failover_condition_settings.audio_silence_settings.audio_selector_name #=> String + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions[0].failover_condition_settings.audio_silence_settings.audio_silence_threshold_msec #=> Integer + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions[0].failover_condition_settings.input_loss_settings.input_loss_threshold_msec #=> Integer + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions[0].failover_condition_settings.video_black_settings.black_detect_threshold #=> Float + # resp.input_attachments[0].automatic_input_failover_settings.failover_conditions[0].failover_condition_settings.video_black_settings.video_black_threshold_msec #=> Integer + # resp.input_attachments[0].automatic_input_failover_settings.input_preference #=> String, one of "EQUAL_INPUT_PREFERENCE", "PRIMARY_INPUT_PREFERRED" + # resp.input_attachments[0].automatic_input_failover_settings.secondary_input_id #=> String + # resp.input_attachments[0].input_attachment_name #=> String + # resp.input_attachments[0].input_id #=> String + # resp.input_attachments[0].input_settings.audio_selectors #=> Array + # resp.input_attachments[0].input_settings.audio_selectors[0].name #=> String + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_hls_rendition_selection.group_id #=> String + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_hls_rendition_selection.name #=> String + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_language_selection.language_code #=> String + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_language_selection.language_selection_policy #=> String, one of "LOOSE", "STRICT" + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_pid_selection.pid #=> Integer + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_track_selection.tracks #=> Array + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_track_selection.tracks[0].track #=> Integer + # resp.input_attachments[0].input_settings.audio_selectors[0].selector_settings.audio_track_selection.dolby_e_decode.program_selection #=> String, one of "ALL_CHANNELS", "PROGRAM_1", "PROGRAM_2", "PROGRAM_3", "PROGRAM_4", "PROGRAM_5", "PROGRAM_6", "PROGRAM_7", "PROGRAM_8" + # resp.input_attachments[0].input_settings.caption_selectors #=> Array + # resp.input_attachments[0].input_settings.caption_selectors[0].language_code #=> String + # resp.input_attachments[0].input_settings.caption_selectors[0].name #=> String + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.ancillary_source_settings.source_ancillary_channel_number #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.dvb_sub_source_settings.ocr_language #=> String, one of "DEU", "ENG", "FRA", "NLD", "POR", "SPA" + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.dvb_sub_source_settings.pid #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.embedded_source_settings.convert_608_to_708 #=> String, one of "DISABLED", "UPCONVERT" + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.embedded_source_settings.scte_20_detection #=> String, one of "AUTO", "OFF" + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.embedded_source_settings.source_608_channel_number #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.embedded_source_settings.source_608_track_number #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.scte_20_source_settings.convert_608_to_708 #=> String, one of "DISABLED", "UPCONVERT" + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.scte_20_source_settings.source_608_channel_number #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.scte_27_source_settings.ocr_language #=> String, one of "DEU", "ENG", "FRA", "NLD", "POR", "SPA" + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.scte_27_source_settings.pid #=> Integer + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.teletext_source_settings.output_rectangle.height #=> Float + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.teletext_source_settings.output_rectangle.left_offset #=> Float + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.teletext_source_settings.output_rectangle.top_offset #=> Float + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.teletext_source_settings.output_rectangle.width #=> Float + # resp.input_attachments[0].input_settings.caption_selectors[0].selector_settings.teletext_source_settings.page_number #=> String + # resp.input_attachments[0].input_settings.deblock_filter #=> String, one of "DISABLED", "ENABLED" + # resp.input_attachments[0].input_settings.denoise_filter #=> String, one of "DISABLED", "ENABLED" + # resp.input_attachments[0].input_settings.filter_strength #=> Integer + # resp.input_attachments[0].input_settings.input_filter #=> String, one of "AUTO", "DISABLED", "FORCED" + # resp.input_attachments[0].input_settings.network_input_settings.hls_input_settings.bandwidth #=> Integer + # resp.input_attachments[0].input_settings.network_input_settings.hls_input_settings.buffer_segments #=> Integer + # resp.input_attachments[0].input_settings.network_input_settings.hls_input_settings.retries #=> Integer + # resp.input_attachments[0].input_settings.network_input_settings.hls_input_settings.retry_interval #=> Integer + # resp.input_attachments[0].input_settings.network_input_settings.hls_input_settings.scte_35_source #=> String, one of "MANIFEST", "SEGMENTS" + # resp.input_attachments[0].input_settings.network_input_settings.server_validation #=> String, one of "CHECK_CRYPTOGRAPHY_AND_VALIDATE_NAME", "CHECK_CRYPTOGRAPHY_ONLY" + # resp.input_attachments[0].input_settings.scte_35_pid #=> Integer + # resp.input_attachments[0].input_settings.smpte_2038_data_preference #=> String, one of "IGNORE", "PREFER" + # resp.input_attachments[0].input_settings.source_end_behavior #=> String, one of "CONTINUE", "LOOP" + # resp.input_attachments[0].input_settings.video_selector.color_space #=> String, one of "FOLLOW", "HDR10", "HLG_2020", "REC_601", "REC_709" + # resp.input_attachments[0].input_settings.video_selector.color_space_settings.hdr_10_settings.max_cll #=> Integer + # resp.input_attachments[0].input_settings.video_selector.color_space_settings.hdr_10_settings.max_fall #=> Integer + # resp.input_attachments[0].input_settings.video_selector.color_space_usage #=> String, one of "FALLBACK", "FORCE" + # resp.input_attachments[0].input_settings.video_selector.selector_settings.video_selector_pid.pid #=> Integer + # resp.input_attachments[0].input_settings.video_selector.selector_settings.video_selector_program_id.program_id #=> Integer + # resp.input_specification.codec #=> String, one of "MPEG2", "AVC", "HEVC" + # resp.input_specification.maximum_bitrate #=> String, one of "MAX_10_MBPS", "MAX_20_MBPS", "MAX_50_MBPS" + # resp.input_specification.resolution #=> String, one of "SD", "HD", "UHD" + # resp.log_level #=> String, one of "ERROR", "WARNING", "INFO", "DEBUG", "DISABLED" + # resp.maintenance.maintenance_day #=> String, one of "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY" + # resp.maintenance.maintenance_deadline #=> String + # resp.maintenance.maintenance_scheduled_date #=> String + # resp.maintenance.maintenance_start_time #=> String + # resp.maintenance_status #=> String + # resp.name #=> String + # resp.pipeline_details #=> Array + # resp.pipeline_details[0].active_input_attachment_name #=> String + # resp.pipeline_details[0].active_input_switch_action_name #=> String + # resp.pipeline_details[0].active_motion_graphics_action_name #=> String + # resp.pipeline_details[0].active_motion_graphics_uri #=> String + # resp.pipeline_details[0].pipeline_id #=> String + # resp.pipelines_running_count #=> Integer + # resp.role_arn #=> String + # resp.state #=> String, one of "CREATING", "CREATE_FAILED", "IDLE", "STARTING", "RUNNING", "RECOVERING", "STOPPING", "DELETING", "DELETED", "UPDATING", "UPDATE_FAILED" + # resp.tags #=> Hash + # resp.tags["__string"] #=> String + # resp.vpc.availability_zones #=> Array + # resp.vpc.availability_zones[0] #=> String + # resp.vpc.network_interface_ids #=> Array + # resp.vpc.network_interface_ids[0] #=> String + # resp.vpc.security_group_ids #=> Array + # resp.vpc.security_group_ids[0] #=> String + # resp.vpc.subnet_ids #=> Array + # resp.vpc.subnet_ids[0] #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/RestartChannelPipelines AWS API Documentation + # + # @overload restart_channel_pipelines(params = {}) + # @param [Hash] params ({}) + def restart_channel_pipelines(params = {}, options = {}) + req = build_request(:restart_channel_pipelines, params) + req.send_request(options) + end + # @!endgroup # @param params ({}) @@ -8718,7 +9430,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-medialive' - context[:gem_version] = '1.115.0' + context[:gem_version] = '1.116.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb index 9a20df23625..0d93b9d9819 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb @@ -120,6 +120,7 @@ module ClientApi ChannelClass = Shapes::StringShape.new(name: 'ChannelClass') ChannelConfigurationValidationError = Shapes::StructureShape.new(name: 'ChannelConfigurationValidationError') ChannelEgressEndpoint = Shapes::StructureShape.new(name: 'ChannelEgressEndpoint') + ChannelPipelineIdToRestart = Shapes::StringShape.new(name: 'ChannelPipelineIdToRestart') ChannelState = Shapes::StringShape.new(name: 'ChannelState') ChannelSummary = Shapes::StructureShape.new(name: 'ChannelSummary') ClaimDeviceRequest = Shapes::StructureShape.new(name: 'ClaimDeviceRequest') @@ -591,6 +592,8 @@ module ClientApi ReservationVideoQuality = Shapes::StringShape.new(name: 'ReservationVideoQuality') ResourceConflict = Shapes::StructureShape.new(name: 'ResourceConflict') ResourceNotFound = Shapes::StructureShape.new(name: 'ResourceNotFound') + RestartChannelPipelinesRequest = Shapes::StructureShape.new(name: 'RestartChannelPipelinesRequest') + RestartChannelPipelinesResponse = Shapes::StructureShape.new(name: 'RestartChannelPipelinesResponse') RtmpAdMarkers = Shapes::StringShape.new(name: 'RtmpAdMarkers') RtmpCacheFullBehavior = Shapes::StringShape.new(name: 'RtmpCacheFullBehavior') RtmpCaptionData = Shapes::StringShape.new(name: 'RtmpCaptionData') @@ -820,6 +823,7 @@ module ClientApi __listOfCaptionLanguageMapping = Shapes::ListShape.new(name: '__listOfCaptionLanguageMapping') __listOfCaptionSelector = Shapes::ListShape.new(name: '__listOfCaptionSelector') __listOfChannelEgressEndpoint = Shapes::ListShape.new(name: '__listOfChannelEgressEndpoint') + __listOfChannelPipelineIdToRestart = Shapes::ListShape.new(name: '__listOfChannelPipelineIdToRestart') __listOfChannelSummary = Shapes::ListShape.new(name: '__listOfChannelSummary') __listOfColorCorrection = Shapes::ListShape.new(name: '__listOfColorCorrection') __listOfFailoverCondition = Shapes::ListShape.new(name: '__listOfFailoverCondition') @@ -2923,6 +2927,31 @@ module ClientApi ResourceNotFound.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message")) ResourceNotFound.struct_class = Types::ResourceNotFound + RestartChannelPipelinesRequest.add_member(:channel_id, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "channelId")) + RestartChannelPipelinesRequest.add_member(:pipeline_ids, Shapes::ShapeRef.new(shape: __listOfChannelPipelineIdToRestart, location_name: "pipelineIds")) + RestartChannelPipelinesRequest.struct_class = Types::RestartChannelPipelinesRequest + + RestartChannelPipelinesResponse.add_member(:arn, Shapes::ShapeRef.new(shape: __string, location_name: "arn")) + RestartChannelPipelinesResponse.add_member(:cdi_input_specification, Shapes::ShapeRef.new(shape: CdiInputSpecification, location_name: "cdiInputSpecification")) + RestartChannelPipelinesResponse.add_member(:channel_class, Shapes::ShapeRef.new(shape: ChannelClass, location_name: "channelClass")) + RestartChannelPipelinesResponse.add_member(:destinations, Shapes::ShapeRef.new(shape: __listOfOutputDestination, location_name: "destinations")) + RestartChannelPipelinesResponse.add_member(:egress_endpoints, Shapes::ShapeRef.new(shape: __listOfChannelEgressEndpoint, location_name: "egressEndpoints")) + RestartChannelPipelinesResponse.add_member(:encoder_settings, Shapes::ShapeRef.new(shape: EncoderSettings, location_name: "encoderSettings")) + RestartChannelPipelinesResponse.add_member(:id, Shapes::ShapeRef.new(shape: __string, location_name: "id")) + RestartChannelPipelinesResponse.add_member(:input_attachments, Shapes::ShapeRef.new(shape: __listOfInputAttachment, location_name: "inputAttachments")) + RestartChannelPipelinesResponse.add_member(:input_specification, Shapes::ShapeRef.new(shape: InputSpecification, location_name: "inputSpecification")) + RestartChannelPipelinesResponse.add_member(:log_level, Shapes::ShapeRef.new(shape: LogLevel, location_name: "logLevel")) + RestartChannelPipelinesResponse.add_member(:maintenance, Shapes::ShapeRef.new(shape: MaintenanceStatus, location_name: "maintenance")) + RestartChannelPipelinesResponse.add_member(:maintenance_status, Shapes::ShapeRef.new(shape: __string, location_name: "maintenanceStatus")) + RestartChannelPipelinesResponse.add_member(:name, Shapes::ShapeRef.new(shape: __string, location_name: "name")) + RestartChannelPipelinesResponse.add_member(:pipeline_details, Shapes::ShapeRef.new(shape: __listOfPipelineDetail, location_name: "pipelineDetails")) + RestartChannelPipelinesResponse.add_member(:pipelines_running_count, Shapes::ShapeRef.new(shape: __integer, location_name: "pipelinesRunningCount")) + RestartChannelPipelinesResponse.add_member(:role_arn, Shapes::ShapeRef.new(shape: __string, location_name: "roleArn")) + RestartChannelPipelinesResponse.add_member(:state, Shapes::ShapeRef.new(shape: ChannelState, location_name: "state")) + RestartChannelPipelinesResponse.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "tags")) + RestartChannelPipelinesResponse.add_member(:vpc, Shapes::ShapeRef.new(shape: VpcOutputSettingsDescription, location_name: "vpc")) + RestartChannelPipelinesResponse.struct_class = Types::RestartChannelPipelinesResponse + RtmpCaptionInfoDestinationSettings.struct_class = Types::RtmpCaptionInfoDestinationSettings RtmpGroupSettings.add_member(:ad_markers, Shapes::ShapeRef.new(shape: __listOfRtmpAdMarkers, location_name: "adMarkers")) @@ -3508,6 +3537,8 @@ module ClientApi __listOfChannelEgressEndpoint.member = Shapes::ShapeRef.new(shape: ChannelEgressEndpoint) + __listOfChannelPipelineIdToRestart.member = Shapes::ShapeRef.new(shape: ChannelPipelineIdToRestart) + __listOfChannelSummary.member = Shapes::ShapeRef.new(shape: ChannelSummary) __listOfColorCorrection.member = Shapes::ShapeRef.new(shape: ColorCorrection) @@ -4651,6 +4682,22 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) o.errors << Shapes::ShapeRef.new(shape: ConflictException) end) + + api.add_operation(:restart_channel_pipelines, Seahorse::Model::Operation.new.tap do |o| + o.name = "RestartChannelPipelines" + o.http_method = "POST" + o.http_request_uri = "/prod/channels/{channelId}/restartChannelPipelines" + o.input = Shapes::ShapeRef.new(shape: RestartChannelPipelinesRequest) + o.output = Shapes::ShapeRef.new(shape: RestartChannelPipelinesResponse) + o.errors << Shapes::ShapeRef.new(shape: BadRequestException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) + o.errors << Shapes::ShapeRef.new(shape: ForbiddenException) + o.errors << Shapes::ShapeRef.new(shape: BadGatewayException) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: GatewayTimeoutException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: ConflictException) + end) end end diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/endpoints.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/endpoints.rb index 68aca7b003b..03c469f717d 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/endpoints.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/endpoints.rb @@ -908,5 +908,19 @@ def self.build(context) end end + class RestartChannelPipelines + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::MediaLive::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + end end diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/plugins/endpoints.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/plugins/endpoints.rb index 93ff9f6d6ab..5b1bf6d3098 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/plugins/endpoints.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/plugins/endpoints.rb @@ -186,6 +186,8 @@ def parameters_for_operation(context) Aws::MediaLive::Endpoints::UpdateMultiplexProgram.build(context) when :update_reservation Aws::MediaLive::Endpoints::UpdateReservation.build(context) + when :restart_channel_pipelines + Aws::MediaLive::Endpoints::RestartChannelPipelines.build(context) end end end diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb index ec8c0d7bfeb..dc8aaa8adf8 100644 --- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb +++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb @@ -1459,7 +1459,8 @@ class CancelInputDeviceTransferResponse < Aws::EmptyStructure; end # @!attribute [rw] accessibility # Indicates whether the caption track implements accessibility # features such as written descriptions of spoken dialog, music, and - # sounds. + # sounds. This signaling is added to HLS output group and MediaPackage + # output group. # @return [String] # # @!attribute [rw] caption_selector_name @@ -2086,7 +2087,7 @@ class ConflictException < Struct.new( # @return [String] # # @!attribute [rw] request_id - # Unique request ID to be specified. This is needed to prevent retries from creating multiple resources. **A suitable default value is auto-generated.** You should normally + # Unique request ID to be specified. This is needed to prevent retries from creating multiple resources.**A suitable default value is auto-generated.** You should normally # not need to pass this option. # @return [String] # @@ -2247,7 +2248,7 @@ class CreateChannelResultModel < Struct.new( # @return [String] # # @!attribute [rw] request_id - # Unique identifier of the request to ensure the request is handled exactly once in case of retries. **A suitable default value is auto-generated.** You should normally + # Unique identifier of the request to ensure the request is handled exactly once in case of retries.**A suitable default value is auto-generated.** You should normally # not need to pass this option. # @return [String] # @@ -2430,7 +2431,7 @@ class CreateInputSecurityGroupResultModel < Struct.new( # @return [String] # # @!attribute [rw] request_id - # Unique request ID. This prevents retries from creating multiple resources. **A suitable default value is auto-generated.** You should normally + # Unique request ID. This prevents retries from creating multiple resources.**A suitable default value is auto-generated.** You should normally # not need to pass this option. # @return [String] # @@ -2459,7 +2460,7 @@ class CreateMultiplex < Struct.new( # @return [String] # # @!attribute [rw] request_id - # Unique request ID. This prevents retries from creating multiple resources. **A suitable default value is auto-generated.** You should normally + # Unique request ID. This prevents retries from creating multiple resources.**A suitable default value is auto-generated.** You should normally # not need to pass this option. # @return [String] # @@ -2578,7 +2579,7 @@ class CreateMultiplexResultModel < Struct.new( end # @!attribute [rw] request_id - # Unique identifier of the request to ensure the request is handled exactly once in case of retries. **A suitable default value is auto-generated.** You should normally + # Unique identifier of the request to ensure the request is handled exactly once in case of retries.**A suitable default value is auto-generated.** You should normally # not need to pass this option. # @return [String] # @@ -3238,6 +3239,8 @@ class DescribeInputDeviceResponse < Struct.new( # @return [String] # # @!attribute [rw] accept + # The HTTP Accept header. Indicates the requested type fothe + # thumbnail. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDeviceThumbnailRequest AWS API Documentation @@ -3250,9 +3253,12 @@ class DescribeInputDeviceThumbnailRequest < Struct.new( end # @!attribute [rw] body + # The binary data for the thumbnail that the Link device has most + # recently sent to MediaLive. # @return [IO] # # @!attribute [rw] content_type + # Specifies the media type of the thumbnail. # @return [String] # # @!attribute [rw] content_length @@ -3262,6 +3268,7 @@ class DescribeInputDeviceThumbnailRequest < Struct.new( # @return [String] # # @!attribute [rw] last_modified + # Placeholder documentation for \_\_timestamp # @return [Time] # # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDeviceThumbnailResponse AWS API Documentation @@ -13251,5 +13258,110 @@ class InputDeviceUhdAudioChannelPairConfig < Struct.new( include Aws::Structure end + # @!attribute [rw] channel_id + # @return [String] + # + # @!attribute [rw] pipeline_ids + # An array of pipelines to restart in this channel. Format PIPELINE\_0 + # or PIPELINE\_1. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/RestartChannelPipelinesRequest AWS API Documentation + # + class RestartChannelPipelinesRequest < Struct.new( + :channel_id, + :pipeline_ids) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] arn + # @return [String] + # + # @!attribute [rw] cdi_input_specification + # @return [Types::CdiInputSpecification] + # + # @!attribute [rw] channel_class + # A standard channel has two encoding pipelines and a single pipeline + # channel only has one. + # @return [String] + # + # @!attribute [rw] destinations + # @return [Array] + # + # @!attribute [rw] egress_endpoints + # @return [Array] + # + # @!attribute [rw] encoder_settings + # Encoder Settings + # @return [Types::EncoderSettings] + # + # @!attribute [rw] id + # @return [String] + # + # @!attribute [rw] input_attachments + # @return [Array] + # + # @!attribute [rw] input_specification + # @return [Types::InputSpecification] + # + # @!attribute [rw] log_level + # The log level the user wants for their channel. + # @return [String] + # + # @!attribute [rw] maintenance + # @return [Types::MaintenanceStatus] + # + # @!attribute [rw] maintenance_status + # @return [String] + # + # @!attribute [rw] name + # @return [String] + # + # @!attribute [rw] pipeline_details + # @return [Array] + # + # @!attribute [rw] pipelines_running_count + # @return [Integer] + # + # @!attribute [rw] role_arn + # @return [String] + # + # @!attribute [rw] state + # @return [String] + # + # @!attribute [rw] tags + # @return [Hash] + # + # @!attribute [rw] vpc + # The properties for a private VPC Output + # @return [Types::VpcOutputSettingsDescription] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/RestartChannelPipelinesResponse AWS API Documentation + # + class RestartChannelPipelinesResponse < Struct.new( + :arn, + :cdi_input_specification, + :channel_class, + :destinations, + :egress_endpoints, + :encoder_settings, + :id, + :input_attachments, + :input_specification, + :log_level, + :maintenance, + :maintenance_status, + :name, + :pipeline_details, + :pipelines_running_count, + :role_arn, + :state, + :tags, + :vpc) + SENSITIVE = [] + include Aws::Structure + end + end end diff --git a/gems/aws-sdk-medialive/sig/client.rbs b/gems/aws-sdk-medialive/sig/client.rbs index e94c4bf5003..bba5dda9cc1 100644 --- a/gems/aws-sdk-medialive/sig/client.rbs +++ b/gems/aws-sdk-medialive/sig/client.rbs @@ -3372,6 +3372,35 @@ module Aws ) -> _UpdateReservationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UpdateReservationResponseSuccess + interface _RestartChannelPipelinesResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::RestartChannelPipelinesResponse] + def arn: () -> ::String + def cdi_input_specification: () -> Types::CdiInputSpecification + def channel_class: () -> ("STANDARD" | "SINGLE_PIPELINE") + def destinations: () -> ::Array[Types::OutputDestination] + def egress_endpoints: () -> ::Array[Types::ChannelEgressEndpoint] + def encoder_settings: () -> Types::EncoderSettings + def id: () -> ::String + def input_attachments: () -> ::Array[Types::InputAttachment] + def input_specification: () -> Types::InputSpecification + def log_level: () -> ("ERROR" | "WARNING" | "INFO" | "DEBUG" | "DISABLED") + def maintenance: () -> Types::MaintenanceStatus + def maintenance_status: () -> ::String + def name: () -> ::String + def pipeline_details: () -> ::Array[Types::PipelineDetail] + def pipelines_running_count: () -> ::Integer + def role_arn: () -> ::String + def state: () -> ("CREATING" | "CREATE_FAILED" | "IDLE" | "STARTING" | "RUNNING" | "RECOVERING" | "STOPPING" | "DELETING" | "DELETED" | "UPDATING" | "UPDATE_FAILED") + def tags: () -> ::Hash[::String, ::String] + def vpc: () -> Types::VpcOutputSettingsDescription + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaLive/Client.html#restart_channel_pipelines-instance_method + def restart_channel_pipelines: ( + channel_id: ::String, + ?pipeline_ids: Array[("PIPELINE_0" | "PIPELINE_1")] + ) -> _RestartChannelPipelinesResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _RestartChannelPipelinesResponseSuccess + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/MediaLive/Client.html#wait_until-instance_method def wait_until: (:channel_created waiter_name, channel_id: ::String diff --git a/gems/aws-sdk-medialive/sig/types.rbs b/gems/aws-sdk-medialive/sig/types.rbs index 3a858ae105e..e80ca065869 100644 --- a/gems/aws-sdk-medialive/sig/types.rbs +++ b/gems/aws-sdk-medialive/sig/types.rbs @@ -3462,5 +3462,34 @@ module Aws::MediaLive attr_accessor profile: ("DISABLED" | "VBR-AAC_HHE-16000" | "VBR-AAC_HE-64000" | "VBR-AAC_LC-128000" | "CBR-AAC_HQ-192000" | "CBR-AAC_HQ-256000" | "CBR-AAC_HQ-384000" | "CBR-AAC_HQ-512000") SENSITIVE: [] end + + class RestartChannelPipelinesRequest + attr_accessor channel_id: ::String + attr_accessor pipeline_ids: ::Array[("PIPELINE_0" | "PIPELINE_1")] + SENSITIVE: [] + end + + class RestartChannelPipelinesResponse + attr_accessor arn: ::String + attr_accessor cdi_input_specification: Types::CdiInputSpecification + attr_accessor channel_class: ("STANDARD" | "SINGLE_PIPELINE") + attr_accessor destinations: ::Array[Types::OutputDestination] + attr_accessor egress_endpoints: ::Array[Types::ChannelEgressEndpoint] + attr_accessor encoder_settings: Types::EncoderSettings + attr_accessor id: ::String + attr_accessor input_attachments: ::Array[Types::InputAttachment] + attr_accessor input_specification: Types::InputSpecification + attr_accessor log_level: ("ERROR" | "WARNING" | "INFO" | "DEBUG" | "DISABLED") + attr_accessor maintenance: Types::MaintenanceStatus + attr_accessor maintenance_status: ::String + attr_accessor name: ::String + attr_accessor pipeline_details: ::Array[Types::PipelineDetail] + attr_accessor pipelines_running_count: ::Integer + attr_accessor role_arn: ::String + attr_accessor state: ("CREATING" | "CREATE_FAILED" | "IDLE" | "STARTING" | "RUNNING" | "RECOVERING" | "STOPPING" | "DELETING" | "DELETED" | "UPDATING" | "UPDATE_FAILED") + attr_accessor tags: ::Hash[::String, ::String] + attr_accessor vpc: Types::VpcOutputSettingsDescription + SENSITIVE: [] + end end end diff --git a/gems/aws-sdk-ssm/CHANGELOG.md b/gems/aws-sdk-ssm/CHANGELOG.md index 7f414caff63..2fe046a8c81 100644 --- a/gems/aws-sdk-ssm/CHANGELOG.md +++ b/gems/aws-sdk-ssm/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.165.0 (2024-02-21) +------------------ + +* Feature - This release adds support for sharing Systems Manager parameters with other AWS accounts. + 1.164.0 (2024-01-31) ------------------ diff --git a/gems/aws-sdk-ssm/VERSION b/gems/aws-sdk-ssm/VERSION index dc6d195e8fe..be881cd1408 100644 --- a/gems/aws-sdk-ssm/VERSION +++ b/gems/aws-sdk-ssm/VERSION @@ -1 +1 @@ -1.164.0 +1.165.0 diff --git a/gems/aws-sdk-ssm/lib/aws-sdk-ssm.rb b/gems/aws-sdk-ssm/lib/aws-sdk-ssm.rb index 30648bd169e..dc25b887a25 100644 --- a/gems/aws-sdk-ssm/lib/aws-sdk-ssm.rb +++ b/gems/aws-sdk-ssm/lib/aws-sdk-ssm.rb @@ -53,6 +53,6 @@ # @!group service module Aws::SSM - GEM_VERSION = '1.164.0' + GEM_VERSION = '1.165.0' end diff --git a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client.rb b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client.rb index 818be88fc45..d0634bd5d0f 100644 --- a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client.rb +++ b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client.rb @@ -2501,6 +2501,11 @@ def delete_ops_metadata(params = {}, options = {}) # @option params [required, String] :name # The name of the parameter to delete. # + # You can't enter the Amazon Resource Name (ARN) for a parameter, only + # the parameter name itself. + # + # + # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values @@ -2525,6 +2530,11 @@ def delete_parameter(params = {}, options = {}) # The names of the parameters to delete. After deleting a parameter, # wait for at least 30 seconds to create a parameter with the same name. # + # You can't enter the Amazon Resource Name (ARN) for a parameter, only + # the parameter name itself. + # + # + # # @return [Types::DeleteParametersResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DeleteParametersResult#deleted_parameters #deleted_parameters} => Array<String> @@ -2610,11 +2620,19 @@ def delete_resource_data_sync(params = {}, options = {}) # Deletes a Systems Manager resource policy. A resource policy helps you # to define the IAM entity (for example, an Amazon Web Services account) - # that can manage your Systems Manager resources. Currently, - # `OpsItemGroup` is the only resource that supports Systems Manager - # resource policies. The resource policy for `OpsItemGroup` enables - # Amazon Web Services accounts to view and interact with OpsCenter - # operational work items (OpsItems). + # that can manage your Systems Manager resources. The following + # resources support Systems Manager resource policies. + # + # * `OpsItemGroup` - The resource policy for `OpsItemGroup` enables + # Amazon Web Services accounts to view and interact with OpsCenter + # operational work items (OpsItems). + # + # * `Parameter` - The resource policy is used to share a parameter with + # other accounts using Resource Access Manager (RAM). For more + # information about cross-account sharing of parameters, see [Working + # with shared + # parameters](systems-manager/latest/userguide/parameter-store-shared-parameters.html) + # in the *Amazon Web Services Systems Manager User Guide*. # # @option params [required, String] :resource_arn # Amazon Resource Name (ARN) of the resource to which the policies are @@ -4907,7 +4925,8 @@ def describe_ops_items(params = {}, options = {}) req.send_request(options) end - # Get information about a parameter. + # Lists the parameters in your Amazon Web Services account or the + # parameters shared with you when you enable the [Shared][1] option. # # Request results are returned on a best-effort basis. If you specify # `MaxResults` in the request, the response includes information up to @@ -4923,6 +4942,10 @@ def describe_ops_items(params = {}, options = {}) # to reference KMS. Otherwise, `DescribeParameters` retrieves whatever # the original key alias was referencing. # + # + # + # [1]: https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_DescribeParameters.html#systemsmanager-DescribeParameters-request-Shared + # # @option params [Array] :filters # This data type is deprecated. Instead, use `ParameterFilters`. # @@ -4938,6 +4961,28 @@ def describe_ops_items(params = {}, options = {}) # The token for the next set of items to return. (You received this # token from a previous call.) # + # @option params [Boolean] :shared + # Lists parameters that are shared with you. + # + # By default when using this option, the command returns parameters that + # have been shared using a standard Resource Access Manager Resource + # Share. In order for a parameter that was shared using the + # PutResourcePolicy command to be returned, the associated `RAM Resource + # Share Created From Policy` must have been promoted to a standard + # Resource Share using the RAM + # [PromoteResourceShareCreatedFromPolicy][1] API operation. + # + # For more information about sharing parameters, see [Working with + # shared + # parameters](systems-manager/latest/userguide/parameter-store-shared-parameters.html) + # in the *Amazon Web Services Systems Manager User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html + # # @return [Types::DescribeParametersResult] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::DescribeParametersResult#parameters #parameters} => Array<Types::ParameterMetadata> @@ -4963,12 +5008,14 @@ def describe_ops_items(params = {}, options = {}) # ], # max_results: 1, # next_token: "NextToken", + # shared: false, # }) # # @example Response structure # # resp.parameters #=> Array # resp.parameters[0].name #=> String + # resp.parameters[0].arn #=> String # resp.parameters[0].type #=> String, one of "String", "StringList", "SecureString" # resp.parameters[0].key_id #=> String # resp.parameters[0].last_modified_date #=> Time @@ -6660,11 +6707,21 @@ def get_ops_summary(params = {}, options = {}) # # # @option params [required, String] :name - # The name of the parameter you want to query. + # The name or Amazon Resource Name (ARN) of the parameter that you want + # to query. For parameters shared with you from another account, you + # must use the full ARN. # # To query by parameter label, use `"Name": "name:label"`. To query by # parameter version, use `"Name": "name:version"`. # + # For more information about shared parameters, see [Working with shared + # parameters][1] in the *Amazon Web Services Systems Manager User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/systems-manager/latest/userguide/sharing.html + # # @option params [Boolean] :with_decryption # Return decrypted values for secure string parameters. This flag is # ignored for `String` and `StringList` parameter types. @@ -6709,7 +6766,9 @@ def get_parameter(params = {}, options = {}) # the original key alias was referencing. # # @option params [required, String] :name - # The name of the parameter for which you want to review history. + # The name or Amazon Resource Name (ARN) of the parameter for which you + # want to review history. For parameters shared with you from another + # account, you must use the full ARN. # # @option params [Boolean] :with_decryption # Return decrypted values for secure string parameters. This flag is @@ -6780,11 +6839,21 @@ def get_parameter_history(params = {}, options = {}) # # # @option params [required, Array] :names - # Names of the parameters for which you want to query information. + # The names or Amazon Resource Names (ARNs) of the parameters that you + # want to query. For parameters shared with you from another account, + # you must use the full ARNs. # # To query by parameter label, use `"Name": "name:label"`. To query by # parameter version, use `"Name": "name:version"`. # + # For more information about shared parameters, see [Working with shared + # parameters][1] in the *Amazon Web Services Systems Manager User + # Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/systems-manager/latest/userguide/sharing.html + # # @option params [Boolean] :with_decryption # Return decrypted secure string value. Return decrypted values for # secure string parameters. This flag is ignored for `String` and @@ -7197,6 +7266,11 @@ def get_service_setting(params = {}, options = {}) # @option params [required, String] :name # The parameter name on which you want to attach one or more labels. # + # You can't enter the Amazon Resource Name (ARN) for a parameter, only + # the parameter name itself. + # + # + # # @option params [Integer] :parameter_version # The specific version of the parameter on which you want to attach one # or more labels. If no version is specified, the system attaches the @@ -8625,8 +8699,15 @@ def put_inventory(params = {}, options = {}) # # @option params [required, String] :name # The fully qualified name of the parameter that you want to add to the - # system. The fully qualified name includes the complete hierarchy of - # the parameter path and name. For parameters in a hierarchy, you must + # system. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, only + # the parameter name itself. + # + # + # + # The fully qualified name includes the complete hierarchy of the + # parameter path and name. For parameters in a hierarchy, you must # include a leading forward slash character (/) when you create or # reference a parameter. For example: `/Dev/DBServer/MySQL/db-string13` # @@ -8929,11 +9010,50 @@ def put_parameter(params = {}, options = {}) # Creates or updates a Systems Manager resource policy. A resource # policy helps you to define the IAM entity (for example, an Amazon Web - # Services account) that can manage your Systems Manager resources. - # Currently, `OpsItemGroup` is the only resource that supports Systems - # Manager resource policies. The resource policy for `OpsItemGroup` - # enables Amazon Web Services accounts to view and interact with - # OpsCenter operational work items (OpsItems). + # Services account) that can manage your Systems Manager resources. The + # following resources support Systems Manager resource policies. + # + # * `OpsItemGroup` - The resource policy for `OpsItemGroup` enables + # Amazon Web Services accounts to view and interact with OpsCenter + # operational work items (OpsItems). + # + # * `Parameter` - The resource policy is used to share a parameter with + # other accounts using Resource Access Manager (RAM). + # + # To share a parameter, it must be in the advanced parameter tier. For + # information about parameter tiers, see [Managing parameter + # tiers][1]. For information about changing an existing standard + # parameter to an advanced parameter, see [Changing a standard + # parameter to an advanced parameter][2]. + # + # To share a `SecureString` parameter, it must be encrypted with a + # customer managed key, and you must share the key separately through + # Key Management Service. Amazon Web Services managed keys cannot be + # shared. Parameters encrypted with the default Amazon Web Services + # managed key can be updated to use a customer managed key instead. + # For KMS key definitions, see [KMS concepts][3] in the *Key + # Management Service Developer Guide*. + # + # While you can share a parameter using the Systems Manager + # `PutResourcePolicy` operation, we recommend using Resource Access + # Manager (RAM) instead. This is because using `PutResourcePolicy` + # requires the extra step of promoting the parameter to a standard RAM + # Resource Share using the RAM + # [PromoteResourceShareCreatedFromPolicy][4] API operation. Otherwise, + # the parameter won't be returned by the Systems Manager + # [DescribeParameters][5] API operation using the `--shared` option. + # + # For more information, see [Sharing a parameter][6] in the *Amazon + # Web Services Systems Manager User Guide* + # + # + # + # [1]: https://docs.aws.amazon.com/parameter-store- advanced-parameters.html + # [2]: https://docs.aws.amazon.com/parameter-store-advanced-parameters.html#parameter- store-advanced-parameters-enabling + # [3]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-mgmt + # [4]: https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html + # [5]: https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_DescribeParameters.html + # [6]: https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-shared-parameters.html#share # # @option params [required, String] :resource_arn # Amazon Resource Name (ARN) of the resource to which you want to attach @@ -10394,6 +10514,11 @@ def terminate_session(params = {}, options = {}) # The name of the parameter from which you want to delete one or more # labels. # + # You can't enter the Amazon Resource Name (ARN) for a parameter, only + # the parameter name itself. + # + # + # # @option params [required, Integer] :parameter_version # The specific version of the parameter which you want to delete one or # more labels from. If it isn't present, the call will fail. @@ -12308,7 +12433,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-ssm' - context[:gem_version] = '1.164.0' + context[:gem_version] = '1.165.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client_api.rb b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client_api.rb index c155032fec8..4f5a1604d5f 100644 --- a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client_api.rb +++ b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/client_api.rb @@ -703,6 +703,7 @@ module ClientApi MaintenanceWindowTaskType = Shapes::StringShape.new(name: 'MaintenanceWindowTaskType') MaintenanceWindowTimezone = Shapes::StringShape.new(name: 'MaintenanceWindowTimezone') MaintenanceWindowsForTargetList = Shapes::ListShape.new(name: 'MaintenanceWindowsForTargetList') + MalformedResourcePolicyDocumentException = Shapes::StructureShape.new(name: 'MalformedResourcePolicyDocumentException') ManagedInstanceId = Shapes::StringShape.new(name: 'ManagedInstanceId') MaxConcurrency = Shapes::StringShape.new(name: 'MaxConcurrency') MaxDocumentSizeExceeded = Shapes::StructureShape.new(name: 'MaxDocumentSizeExceeded') @@ -1040,10 +1041,12 @@ module ClientApi ResourceId = Shapes::StringShape.new(name: 'ResourceId') ResourceInUseException = Shapes::StructureShape.new(name: 'ResourceInUseException') ResourceLimitExceededException = Shapes::StructureShape.new(name: 'ResourceLimitExceededException') + ResourceNotFoundException = Shapes::StructureShape.new(name: 'ResourceNotFoundException') ResourcePolicyConflictException = Shapes::StructureShape.new(name: 'ResourcePolicyConflictException') ResourcePolicyInvalidParameterException = Shapes::StructureShape.new(name: 'ResourcePolicyInvalidParameterException') ResourcePolicyLimitExceededException = Shapes::StructureShape.new(name: 'ResourcePolicyLimitExceededException') ResourcePolicyMaxResults = Shapes::IntegerShape.new(name: 'ResourcePolicyMaxResults') + ResourcePolicyNotFoundException = Shapes::StructureShape.new(name: 'ResourcePolicyNotFoundException') ResourcePolicyParameterNamesList = Shapes::ListShape.new(name: 'ResourcePolicyParameterNamesList') ResourceType = Shapes::StringShape.new(name: 'ResourceType') ResourceTypeForTagging = Shapes::StringShape.new(name: 'ResourceTypeForTagging') @@ -2233,6 +2236,7 @@ module ClientApi DescribeParametersRequest.add_member(:parameter_filters, Shapes::ShapeRef.new(shape: ParameterStringFilterList, location_name: "ParameterFilters")) DescribeParametersRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults", metadata: {"box"=>true})) DescribeParametersRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + DescribeParametersRequest.add_member(:shared, Shapes::ShapeRef.new(shape: Boolean, location_name: "Shared", metadata: {"box"=>true})) DescribeParametersRequest.struct_class = Types::DescribeParametersRequest DescribeParametersResult.add_member(:parameters, Shapes::ShapeRef.new(shape: ParameterMetadataList, location_name: "Parameters")) @@ -3480,6 +3484,9 @@ module ClientApi MaintenanceWindowsForTargetList.member = Shapes::ShapeRef.new(shape: MaintenanceWindowIdentityForTarget) + MalformedResourcePolicyDocumentException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) + MalformedResourcePolicyDocumentException.struct_class = Types::MalformedResourcePolicyDocumentException + MaxDocumentSizeExceeded.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) MaxDocumentSizeExceeded.struct_class = Types::MaxDocumentSizeExceeded @@ -3791,6 +3798,7 @@ module ClientApi ParameterMaxVersionLimitExceeded.struct_class = Types::ParameterMaxVersionLimitExceeded ParameterMetadata.add_member(:name, Shapes::ShapeRef.new(shape: PSParameterName, location_name: "Name")) + ParameterMetadata.add_member(:arn, Shapes::ShapeRef.new(shape: String, location_name: "ARN")) ParameterMetadata.add_member(:type, Shapes::ShapeRef.new(shape: ParameterType, location_name: "Type")) ParameterMetadata.add_member(:key_id, Shapes::ShapeRef.new(shape: ParameterKeyId, location_name: "KeyId")) ParameterMetadata.add_member(:last_modified_date, Shapes::ShapeRef.new(shape: DateTime, location_name: "LastModifiedDate")) @@ -4183,6 +4191,9 @@ module ClientApi ResourceLimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) ResourceLimitExceededException.struct_class = Types::ResourceLimitExceededException + ResourceNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) + ResourceNotFoundException.struct_class = Types::ResourceNotFoundException + ResourcePolicyConflictException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) ResourcePolicyConflictException.struct_class = Types::ResourcePolicyConflictException @@ -4195,6 +4206,9 @@ module ClientApi ResourcePolicyLimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) ResourcePolicyLimitExceededException.struct_class = Types::ResourcePolicyLimitExceededException + ResourcePolicyNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "Message")) + ResourcePolicyNotFoundException.struct_class = Types::ResourcePolicyNotFoundException + ResourcePolicyParameterNamesList.member = Shapes::ShapeRef.new(shape: String) ResultAttribute.add_member(:type_name, Shapes::ShapeRef.new(shape: InventoryItemTypeName, required: true, location_name: "TypeName")) @@ -5050,6 +5064,9 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServerError) o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyInvalidParameterException) o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyConflictException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: MalformedResourcePolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyNotFoundException) end) api.add_operation(:deregister_managed_instance, Seahorse::Model::Operation.new.tap do |o| @@ -5910,6 +5927,7 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: GetResourcePoliciesResponse) o.errors << Shapes::ShapeRef.new(shape: InternalServerError) o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyInvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) o[:pager] = Aws::Pager.new( limit_key: "max_results", tokens: { @@ -6283,6 +6301,9 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyInvalidParameterException) o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyLimitExceededException) o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyConflictException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: MalformedResourcePolicyDocumentException) + o.errors << Shapes::ShapeRef.new(shape: ResourcePolicyNotFoundException) end) api.add_operation(:register_default_patch_baseline, Seahorse::Model::Operation.new.tap do |o| diff --git a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/errors.rb b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/errors.rb index 9d69f35a5c7..ad7b9715869 100644 --- a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/errors.rb +++ b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/errors.rb @@ -108,6 +108,7 @@ module Aws::SSM # * {InvocationDoesNotExist} # * {ItemContentMismatchException} # * {ItemSizeLimitExceededException} + # * {MalformedResourcePolicyDocumentException} # * {MaxDocumentSizeExceeded} # * {OpsItemAccessDeniedException} # * {OpsItemAlreadyExistsException} @@ -138,9 +139,11 @@ module Aws::SSM # * {ResourceDataSyncNotFoundException} # * {ResourceInUseException} # * {ResourceLimitExceededException} + # * {ResourceNotFoundException} # * {ResourcePolicyConflictException} # * {ResourcePolicyInvalidParameterException} # * {ResourcePolicyLimitExceededException} + # * {ResourcePolicyNotFoundException} # * {ServiceSettingNotFound} # * {StatusUnchanged} # * {SubTypeCountLimitExceededException} @@ -1333,6 +1336,21 @@ def message end end + class MalformedResourcePolicyDocumentException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSM::Types::MalformedResourcePolicyDocumentException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + class MaxDocumentSizeExceeded < ServiceError # @param [Seahorse::Client::RequestContext] context @@ -1828,6 +1846,21 @@ def message end end + class ResourceNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSM::Types::ResourceNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + class ResourcePolicyConflictException < ServiceError # @param [Seahorse::Client::RequestContext] context @@ -1888,6 +1921,21 @@ def message end end + class ResourcePolicyNotFoundException < ServiceError + + # @param [Seahorse::Client::RequestContext] context + # @param [String] message + # @param [Aws::SSM::Types::ResourcePolicyNotFoundException] data + def initialize(context, message, data = Aws::EmptyStructure.new) + super(context, message, data) + end + + # @return [String] + def message + @message || @data[:message] + end + end + class ServiceSettingNotFound < ServiceError # @param [Seahorse::Client::RequestContext] context diff --git a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/types.rb b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/types.rb index 764f4f6e975..74550a39a53 100644 --- a/gems/aws-sdk-ssm/lib/aws-sdk-ssm/types.rb +++ b/gems/aws-sdk-ssm/lib/aws-sdk-ssm/types.rb @@ -4231,6 +4231,11 @@ class DeleteOpsMetadataResult < Aws::EmptyStructure; end # @!attribute [rw] name # The name of the parameter to delete. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, + # only the parameter name itself. + # + # # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParameterRequest AWS API Documentation @@ -4249,6 +4254,11 @@ class DeleteParameterResult < Aws::EmptyStructure; end # The names of the parameters to delete. After deleting a parameter, # wait for at least 30 seconds to create a parameter with the same # name. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, + # only the parameter name itself. + # + # # @return [Array] # # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteParametersRequest AWS API Documentation @@ -5992,13 +6002,37 @@ class DescribeOpsItemsResponse < Struct.new( # token from a previous call.) # @return [String] # + # @!attribute [rw] shared + # Lists parameters that are shared with you. + # + # By default when using this option, the command returns parameters + # that have been shared using a standard Resource Access Manager + # Resource Share. In order for a parameter that was shared using the + # PutResourcePolicy command to be returned, the associated `RAM + # Resource Share Created From Policy` must have been promoted to a + # standard Resource Share using the RAM + # [PromoteResourceShareCreatedFromPolicy][1] API operation. + # + # For more information about sharing parameters, see [Working with + # shared + # parameters](systems-manager/latest/userguide/parameter-store-shared-parameters.html) + # in the *Amazon Web Services Systems Manager User Guide*. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeParametersRequest AWS API Documentation # class DescribeParametersRequest < Struct.new( :filters, :parameter_filters, :max_results, - :next_token) + :next_token, + :shared) SENSITIVE = [] include Aws::Structure end @@ -8566,7 +8600,9 @@ class GetOpsSummaryResult < Struct.new( end # @!attribute [rw] name - # The name of the parameter for which you want to review history. + # The name or Amazon Resource Name (ARN) of the parameter for which + # you want to review history. For parameters shared with you from + # another account, you must use the full ARN. # @return [String] # # @!attribute [rw] with_decryption @@ -8615,10 +8651,20 @@ class GetParameterHistoryResult < Struct.new( end # @!attribute [rw] name - # The name of the parameter you want to query. + # The name or Amazon Resource Name (ARN) of the parameter that you + # want to query. For parameters shared with you from another account, + # you must use the full ARN. # # To query by parameter label, use `"Name": "name:label"`. To query by # parameter version, use `"Name": "name:version"`. + # + # For more information about shared parameters, see [Working with + # shared parameters][1] in the *Amazon Web Services Systems Manager + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/systems-manager/latest/userguide/sharing.html # @return [String] # # @!attribute [rw] with_decryption @@ -8727,10 +8773,20 @@ class GetParametersByPathResult < Struct.new( end # @!attribute [rw] names - # Names of the parameters for which you want to query information. + # The names or Amazon Resource Names (ARNs) of the parameters that you + # want to query. For parameters shared with you from another account, + # you must use the full ARNs. # # To query by parameter label, use `"Name": "name:label"`. To query by # parameter version, use `"Name": "name:version"`. + # + # For more information about shared parameters, see [Working with + # shared parameters][1] in the *Amazon Web Services Systems Manager + # User Guide*. + # + # + # + # [1]: https://docs.aws.amazon.com/systems-manager/latest/userguide/sharing.html # @return [Array] # # @!attribute [rw] with_decryption @@ -10807,6 +10863,11 @@ class ItemSizeLimitExceededException < Struct.new( # @!attribute [rw] name # The parameter name on which you want to attach one or more labels. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, + # only the parameter name itself. + # + # # @return [String] # # @!attribute [rw] parameter_version @@ -12519,6 +12580,20 @@ class MaintenanceWindowTaskParameterValueExpression < Struct.new( include Aws::Structure end + # The specified policy document is malformed or invalid, or excessive + # `PutResourcePolicy` or `DeleteResourcePolicy` calls have been made. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/MalformedResourcePolicyDocumentException AWS API Documentation + # + class MalformedResourcePolicyDocumentException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + # The size limit of a document is 64 KB. # # @!attribute [rw] message @@ -13882,13 +13957,18 @@ class ParameterMaxVersionLimitExceeded < Struct.new( include Aws::Structure end - # Metadata includes information like the ARN of the last user and the - # date/time the parameter was last used. + # Metadata includes information like the Amazon Resource Name (ARN) of + # the last user to update the parameter and the date and time the + # parameter was last used. # # @!attribute [rw] name # The parameter name. # @return [String] # + # @!attribute [rw] arn + # The (ARN) of the last user to update the parameter. + # @return [String] + # # @!attribute [rw] type # The type of parameter. Valid parameter types include the following: # `String`, `StringList`, and `SecureString`. @@ -13939,6 +14019,7 @@ class ParameterMaxVersionLimitExceeded < Struct.new( # class ParameterMetadata < Struct.new( :name, + :arn, :type, :key_id, :last_modified_date, @@ -14760,10 +14841,17 @@ class PutInventoryResult < Struct.new( # @!attribute [rw] name # The fully qualified name of the parameter that you want to add to - # the system. The fully qualified name includes the complete hierarchy - # of the parameter path and name. For parameters in a hierarchy, you - # must include a leading forward slash character (/) when you create - # or reference a parameter. For example: + # the system. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, + # only the parameter name itself. + # + # + # + # The fully qualified name includes the complete hierarchy of the + # parameter path and name. For parameters in a hierarchy, you must + # include a leading forward slash character (/) when you create or + # reference a parameter. For example: # `/Dev/DBServer/MySQL/db-string13` # # Naming Constraints: @@ -16131,6 +16219,19 @@ class ResourceLimitExceededException < Struct.new( include Aws::Structure end + # The specified parameter to be shared could not be found. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResourceNotFoundException AWS API Documentation + # + class ResourceNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + # The hash provided in the call doesn't match the stored hash. This # exception is thrown when trying to update an obsolete policy version # or when multiple requests to update a policy are sent. @@ -16187,6 +16288,19 @@ class ResourcePolicyLimitExceededException < Struct.new( include Aws::Structure end + # No policies with the specified policy ID and hash could be found. + # + # @!attribute [rw] message + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ResourcePolicyNotFoundException AWS API Documentation + # + class ResourcePolicyNotFoundException < Struct.new( + :message) + SENSITIVE = [] + include Aws::Structure + end + # The inventory item result attribute. # # @!attribute [rw] type_name @@ -17774,6 +17888,11 @@ class TotalSizeLimitExceededException < Struct.new( # @!attribute [rw] name # The name of the parameter from which you want to delete one or more # labels. + # + # You can't enter the Amazon Resource Name (ARN) for a parameter, + # only the parameter name itself. + # + # # @return [String] # # @!attribute [rw] parameter_version diff --git a/gems/aws-sdk-ssm/sig/client.rbs b/gems/aws-sdk-ssm/sig/client.rbs index a7d41127caa..acee12a4fa0 100644 --- a/gems/aws-sdk-ssm/sig/client.rbs +++ b/gems/aws-sdk-ssm/sig/client.rbs @@ -1158,7 +1158,8 @@ module Aws }, ], ?max_results: ::Integer, - ?next_token: ::String + ?next_token: ::String, + ?shared: bool ) -> _DescribeParametersResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeParametersResponseSuccess diff --git a/gems/aws-sdk-ssm/sig/errors.rbs b/gems/aws-sdk-ssm/sig/errors.rbs index 73c6a7d772f..9cce7415b40 100644 --- a/gems/aws-sdk-ssm/sig/errors.rbs +++ b/gems/aws-sdk-ssm/sig/errors.rbs @@ -245,6 +245,9 @@ module Aws def type_name: () -> ::String def message: () -> ::String end + class MalformedResourcePolicyDocumentException < ::Aws::Errors::ServiceError + def message: () -> ::String + end class MaxDocumentSizeExceeded < ::Aws::Errors::ServiceError def message: () -> ::String end @@ -344,6 +347,9 @@ module Aws class ResourceLimitExceededException < ::Aws::Errors::ServiceError def message: () -> ::String end + class ResourceNotFoundException < ::Aws::Errors::ServiceError + def message: () -> ::String + end class ResourcePolicyConflictException < ::Aws::Errors::ServiceError def message: () -> ::String end @@ -356,6 +362,9 @@ module Aws def limit_type: () -> ::String def message: () -> ::String end + class ResourcePolicyNotFoundException < ::Aws::Errors::ServiceError + def message: () -> ::String + end class ServiceSettingNotFound < ::Aws::Errors::ServiceError def message: () -> ::String end diff --git a/gems/aws-sdk-ssm/sig/types.rbs b/gems/aws-sdk-ssm/sig/types.rbs index 6f5566632a4..030bb6be7a3 100644 --- a/gems/aws-sdk-ssm/sig/types.rbs +++ b/gems/aws-sdk-ssm/sig/types.rbs @@ -1254,6 +1254,7 @@ module Aws::SSM attr_accessor parameter_filters: ::Array[Types::ParameterStringFilter] attr_accessor max_results: ::Integer attr_accessor next_token: ::String + attr_accessor shared: bool SENSITIVE: [] end @@ -2813,6 +2814,11 @@ module Aws::SSM SENSITIVE: [:values] end + class MalformedResourcePolicyDocumentException + attr_accessor message: ::String + SENSITIVE: [] + end + class MaxDocumentSizeExceeded attr_accessor message: ::String SENSITIVE: [] @@ -3142,6 +3148,7 @@ module Aws::SSM class ParameterMetadata attr_accessor name: ::String + attr_accessor arn: ::String attr_accessor type: ("String" | "StringList" | "SecureString") attr_accessor key_id: ::String attr_accessor last_modified_date: ::Time @@ -3577,6 +3584,11 @@ module Aws::SSM SENSITIVE: [] end + class ResourceNotFoundException + attr_accessor message: ::String + SENSITIVE: [] + end + class ResourcePolicyConflictException attr_accessor message: ::String SENSITIVE: [] @@ -3595,6 +3607,11 @@ module Aws::SSM SENSITIVE: [] end + class ResourcePolicyNotFoundException + attr_accessor message: ::String + SENSITIVE: [] + end + class ResultAttribute attr_accessor type_name: ::String SENSITIVE: [] From 7128c6475e0fba8a008a5eac9284f93c6ffc6936 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Thu, 22 Feb 2024 19:25:25 +0000 Subject: [PATCH 4/8] Updated API models and rebuilt service gems. --- apis/internetmonitor/2021-06-03/api-2.json | 7 +++++- apis/internetmonitor/2021-06-03/docs-2.json | 11 ++++++++-- apis/kinesisvideo/2017-09-30/api-2.json | 2 +- gems/aws-sdk-internetmonitor/CHANGELOG.md | 5 +++++ gems/aws-sdk-internetmonitor/VERSION | 2 +- .../lib/aws-sdk-internetmonitor.rb | 2 +- .../lib/aws-sdk-internetmonitor/client.rb | 18 +++++++++++---- .../lib/aws-sdk-internetmonitor/client_api.rb | 4 ++++ .../lib/aws-sdk-internetmonitor/types.rb | 22 ++++++++++++++----- gems/aws-sdk-internetmonitor/sig/types.rbs | 1 + gems/aws-sdk-kinesisvideo/CHANGELOG.md | 5 +++++ gems/aws-sdk-kinesisvideo/VERSION | 2 +- .../lib/aws-sdk-kinesisvideo.rb | 2 +- .../lib/aws-sdk-kinesisvideo/client.rb | 2 +- 14 files changed, 67 insertions(+), 18 deletions(-) diff --git a/apis/internetmonitor/2021-06-03/api-2.json b/apis/internetmonitor/2021-06-03/api-2.json index 5273bc9bb07..1abea8b4f79 100644 --- a/apis/internetmonitor/2021-06-03/api-2.json +++ b/apis/internetmonitor/2021-06-03/api-2.json @@ -595,7 +595,8 @@ "ServiceLocation":{"shape":"String"}, "Status":{"shape":"HealthEventStatus"}, "CausedBy":{"shape":"NetworkImpairment"}, - "InternetHealth":{"shape":"InternetHealth"} + "InternetHealth":{"shape":"InternetHealth"}, + "Ipv4Prefixes":{"shape":"Ipv4PrefixList"} } }, "ImpactedLocationsList":{ @@ -635,6 +636,10 @@ "S3Config":{"shape":"S3Config"} } }, + "Ipv4PrefixList":{ + "type":"list", + "member":{"shape":"String"} + }, "LimitExceededException":{ "type":"structure", "members":{ diff --git a/apis/internetmonitor/2021-06-03/docs-2.json b/apis/internetmonitor/2021-06-03/docs-2.json index 9bc50cd17d3..e32df52f0da 100644 --- a/apis/internetmonitor/2021-06-03/docs-2.json +++ b/apis/internetmonitor/2021-06-03/docs-2.json @@ -224,6 +224,12 @@ "UpdateMonitorInput$InternetMeasurementsLogDelivery": "

Publish internet measurements for Internet Monitor to another location, such as an Amazon S3 bucket. The measurements are also published to Amazon CloudWatch Logs.

" } }, + "Ipv4PrefixList": { + "base": null, + "refs": { + "ImpactedLocation$Ipv4Prefixes": "

The IPv4 prefixes at the client location that was impacted by the health event.

" + } + }, "LimitExceededException": { "base": "

The request exceeded a service quota.

", "refs": { @@ -379,7 +385,7 @@ "HealthEventsConfig$AvailabilityScoreThreshold": "

The health event threshold percentage set for availability scores.

", "HealthEventsConfig$PerformanceScoreThreshold": "

The health event threshold percentage set for performance scores.

", "LocalHealthEventsConfig$HealthScoreThreshold": "

The health event threshold percentage set for a local health score.

", - "LocalHealthEventsConfig$MinTrafficImpact": "

The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

If you don't set a minimum traffic impact threshold, the default value is 0.01%.

" + "LocalHealthEventsConfig$MinTrafficImpact": "

The minimum percentage of overall traffic for an application that must be impacted by an issue before Internet Monitor creates an event when a threshold is crossed for a local health score.

If you don't set a minimum traffic impact threshold, the default value is 0.1%.

" } }, "PerformanceMeasurement": { @@ -427,7 +433,7 @@ "QueryType": { "base": null, "refs": { - "StartQueryInput$QueryType": "

The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface:

  • MEASUREMENTS: TBD definition

  • TOP_LOCATIONS: TBD definition

  • TOP_LOCATION_DETAILS: TBD definition

For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide.

" + "StartQueryInput$QueryType": "

The type of query to run. The following are the three types of queries that you can run using the Internet Monitor query interface:

  • MEASUREMENTS: Provides availability score, performance score, total traffic, and round-trip times, at 5 minute intervals.

  • TOP_LOCATIONS: Provides availability score, performance score, total traffic, and time to first byte (TTFB) information, for the top location and ASN combinations that you're monitoring, by traffic volume.

  • TOP_LOCATION_DETAILS: Provides TTFB for Amazon CloudFront, your current configuration, and the best performing EC2 configuration, at 1 hour intervals.

For lists of the fields returned with each query type and more information about how each type of query is performed, see Using the Amazon CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide.

" } }, "ResourceName": { @@ -523,6 +529,7 @@ "ImpactedLocation$ServiceLocation": "

The service location where the health event is located.

", "InternalServerErrorException$message": null, "InternalServerException$message": null, + "Ipv4PrefixList$member": null, "LimitExceededException$message": null, "ListHealthEventsInput$NextToken": "

The token for the next set of results. You receive this token from a previous call.

", "ListHealthEventsOutput$NextToken": "

The token for the next set of results. You receive this token from a previous call.

", diff --git a/apis/kinesisvideo/2017-09-30/api-2.json b/apis/kinesisvideo/2017-09-30/api-2.json index e0ef9d86b1d..9fc6bfe41cf 100644 --- a/apis/kinesisvideo/2017-09-30/api-2.json +++ b/apis/kinesisvideo/2017-09-30/api-2.json @@ -1188,7 +1188,7 @@ }, "NextToken":{ "type":"string", - "max":512, + "max":1024, "min":0, "pattern":"[a-zA-Z0-9+/=]*" }, diff --git a/gems/aws-sdk-internetmonitor/CHANGELOG.md b/gems/aws-sdk-internetmonitor/CHANGELOG.md index a4b3d4463e5..fe6cb4e9242 100644 --- a/gems/aws-sdk-internetmonitor/CHANGELOG.md +++ b/gems/aws-sdk-internetmonitor/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.15.0 (2024-02-22) +------------------ + +* Feature - This release adds IPv4 prefixes to health events + 1.14.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-internetmonitor/VERSION b/gems/aws-sdk-internetmonitor/VERSION index 850e742404b..141f2e805be 100644 --- a/gems/aws-sdk-internetmonitor/VERSION +++ b/gems/aws-sdk-internetmonitor/VERSION @@ -1 +1 @@ -1.14.0 +1.15.0 diff --git a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor.rb b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor.rb index 82df56c4d1d..de6e4b9ab34 100644 --- a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor.rb +++ b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor.rb @@ -53,6 +53,6 @@ # @!group service module Aws::InternetMonitor - GEM_VERSION = '1.14.0' + GEM_VERSION = '1.15.0' end diff --git a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client.rb b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client.rb index 3441d3f9dda..1d5d3e9aee1 100644 --- a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client.rb +++ b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client.rb @@ -645,6 +645,8 @@ def delete_monitor(params = {}, options = {}) # resp.impacted_locations[0].internet_health.performance.round_trip_time.p50 #=> Float # resp.impacted_locations[0].internet_health.performance.round_trip_time.p90 #=> Float # resp.impacted_locations[0].internet_health.performance.round_trip_time.p95 #=> Float + # resp.impacted_locations[0].ipv_4_prefixes #=> Array + # resp.impacted_locations[0].ipv_4_prefixes[0] #=> String # resp.status #=> String, one of "ACTIVE", "RESOLVED" # resp.percent_of_total_traffic_impacted #=> Float # resp.impact_type #=> String, one of "AVAILABILITY", "PERFORMANCE", "LOCAL_AVAILABILITY", "LOCAL_PERFORMANCE" @@ -919,6 +921,8 @@ def get_query_status(params = {}, options = {}) # resp.health_events[0].impacted_locations[0].internet_health.performance.round_trip_time.p50 #=> Float # resp.health_events[0].impacted_locations[0].internet_health.performance.round_trip_time.p90 #=> Float # resp.health_events[0].impacted_locations[0].internet_health.performance.round_trip_time.p95 #=> Float + # resp.health_events[0].impacted_locations[0].ipv_4_prefixes #=> Array + # resp.health_events[0].impacted_locations[0].ipv_4_prefixes[0] #=> String # resp.health_events[0].status #=> String, one of "ACTIVE", "RESOLVED" # resp.health_events[0].percent_of_total_traffic_impacted #=> Float # resp.health_events[0].impact_type #=> String, one of "AVAILABILITY", "PERFORMANCE", "LOCAL_AVAILABILITY", "LOCAL_PERFORMANCE" @@ -1047,11 +1051,17 @@ def list_tags_for_resource(params = {}, options = {}) # The type of query to run. The following are the three types of queries # that you can run using the Internet Monitor query interface: # - # * `MEASUREMENTS`: TBD definition + # * `MEASUREMENTS`: Provides availability score, performance score, + # total traffic, and round-trip times, at 5 minute intervals. # - # * `TOP_LOCATIONS`: TBD definition + # * `TOP_LOCATIONS`: Provides availability score, performance score, + # total traffic, and time to first byte (TTFB) information, for the + # top location and ASN combinations that you're monitoring, by + # traffic volume. # - # * `TOP_LOCATION_DETAILS`: TBD definition + # * `TOP_LOCATION_DETAILS`: Provides TTFB for Amazon CloudFront, your + # current configuration, and the best performing EC2 configuration, at + # 1 hour intervals. # # For lists of the fields returned with each query type and more # information about how each type of query is performed, see [ Using the @@ -1352,7 +1362,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-internetmonitor' - context[:gem_version] = '1.14.0' + context[:gem_version] = '1.15.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client_api.rb b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client_api.rb index 6a914da886a..9c7c469e7b2 100644 --- a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client_api.rb +++ b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/client_api.rb @@ -46,6 +46,7 @@ module ClientApi InternalServerException = Shapes::StructureShape.new(name: 'InternalServerException') InternetHealth = Shapes::StructureShape.new(name: 'InternetHealth') InternetMeasurementsLogDelivery = Shapes::StructureShape.new(name: 'InternetMeasurementsLogDelivery') + Ipv4PrefixList = Shapes::ListShape.new(name: 'Ipv4PrefixList') LimitExceededException = Shapes::StructureShape.new(name: 'LimitExceededException') ListHealthEventsInput = Shapes::StructureShape.new(name: 'ListHealthEventsInput') ListHealthEventsOutput = Shapes::StructureShape.new(name: 'ListHealthEventsOutput') @@ -236,6 +237,7 @@ module ClientApi ImpactedLocation.add_member(:status, Shapes::ShapeRef.new(shape: HealthEventStatus, required: true, location_name: "Status")) ImpactedLocation.add_member(:caused_by, Shapes::ShapeRef.new(shape: NetworkImpairment, location_name: "CausedBy")) ImpactedLocation.add_member(:internet_health, Shapes::ShapeRef.new(shape: InternetHealth, location_name: "InternetHealth")) + ImpactedLocation.add_member(:ipv_4_prefixes, Shapes::ShapeRef.new(shape: Ipv4PrefixList, location_name: "Ipv4Prefixes")) ImpactedLocation.struct_class = Types::ImpactedLocation ImpactedLocationsList.member = Shapes::ShapeRef.new(shape: ImpactedLocation) @@ -253,6 +255,8 @@ module ClientApi InternetMeasurementsLogDelivery.add_member(:s3_config, Shapes::ShapeRef.new(shape: S3Config, location_name: "S3Config")) InternetMeasurementsLogDelivery.struct_class = Types::InternetMeasurementsLogDelivery + Ipv4PrefixList.member = Shapes::ShapeRef.new(shape: String) + LimitExceededException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message")) LimitExceededException.struct_class = Types::LimitExceededException diff --git a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/types.rb b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/types.rb index 46a4b53c1e9..f97ea44a0c2 100644 --- a/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/types.rb +++ b/gems/aws-sdk-internetmonitor/lib/aws-sdk-internetmonitor/types.rb @@ -840,6 +840,11 @@ class HealthEventsConfig < Struct.new( # The calculated health at a specific location. # @return [Types::InternetHealth] # + # @!attribute [rw] ipv_4_prefixes + # The IPv4 prefixes at the client location that was impacted by the + # health event. + # @return [Array] + # # @see http://docs.aws.amazon.com/goto/WebAPI/internetmonitor-2021-06-03/ImpactedLocation AWS API Documentation # class ImpactedLocation < Struct.new( @@ -856,7 +861,8 @@ class ImpactedLocation < Struct.new( :service_location, :status, :caused_by, - :internet_health) + :internet_health, + :ipv_4_prefixes) SENSITIVE = [] include Aws::Structure end @@ -1143,7 +1149,7 @@ class ListTagsForResourceOutput < Struct.new( # event when a threshold is crossed for a local health score. # # If you don't set a minimum traffic impact threshold, the default - # value is 0.01%. + # value is 0.1%. # @return [Float] # # @see http://docs.aws.amazon.com/goto/WebAPI/internetmonitor-2021-06-03/LocalHealthEventsConfig AWS API Documentation @@ -1463,11 +1469,17 @@ class S3Config < Struct.new( # The type of query to run. The following are the three types of # queries that you can run using the Internet Monitor query interface: # - # * `MEASUREMENTS`: TBD definition + # * `MEASUREMENTS`: Provides availability score, performance score, + # total traffic, and round-trip times, at 5 minute intervals. # - # * `TOP_LOCATIONS`: TBD definition + # * `TOP_LOCATIONS`: Provides availability score, performance score, + # total traffic, and time to first byte (TTFB) information, for the + # top location and ASN combinations that you're monitoring, by + # traffic volume. # - # * `TOP_LOCATION_DETAILS`: TBD definition + # * `TOP_LOCATION_DETAILS`: Provides TTFB for Amazon CloudFront, your + # current configuration, and the best performing EC2 configuration, + # at 1 hour intervals. # # For lists of the fields returned with each query type and more # information about how each type of query is performed, see [ Using diff --git a/gems/aws-sdk-internetmonitor/sig/types.rbs b/gems/aws-sdk-internetmonitor/sig/types.rbs index 23342987fa2..1da2b8c35d7 100644 --- a/gems/aws-sdk-internetmonitor/sig/types.rbs +++ b/gems/aws-sdk-internetmonitor/sig/types.rbs @@ -170,6 +170,7 @@ module Aws::InternetMonitor attr_accessor status: ("ACTIVE" | "RESOLVED") attr_accessor caused_by: Types::NetworkImpairment attr_accessor internet_health: Types::InternetHealth + attr_accessor ipv_4_prefixes: ::Array[::String] SENSITIVE: [] end diff --git a/gems/aws-sdk-kinesisvideo/CHANGELOG.md b/gems/aws-sdk-kinesisvideo/CHANGELOG.md index dd34e100bbc..09d9104b326 100644 --- a/gems/aws-sdk-kinesisvideo/CHANGELOG.md +++ b/gems/aws-sdk-kinesisvideo/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.60.0 (2024-02-22) +------------------ + +* Feature - Increasing NextToken parameter length restriction for List APIs from 512 to 1024. + 1.59.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-kinesisvideo/VERSION b/gems/aws-sdk-kinesisvideo/VERSION index bb120e876c6..4d5fde5bd16 100644 --- a/gems/aws-sdk-kinesisvideo/VERSION +++ b/gems/aws-sdk-kinesisvideo/VERSION @@ -1 +1 @@ -1.59.0 +1.60.0 diff --git a/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo.rb b/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo.rb index 2a941c05063..9eaff48a73d 100644 --- a/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo.rb +++ b/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo.rb @@ -52,6 +52,6 @@ # @!group service module Aws::KinesisVideo - GEM_VERSION = '1.59.0' + GEM_VERSION = '1.60.0' end diff --git a/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo/client.rb b/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo/client.rb index 85f169be58a..dc9c19bacb6 100644 --- a/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo/client.rb +++ b/gems/aws-sdk-kinesisvideo/lib/aws-sdk-kinesisvideo/client.rb @@ -1938,7 +1938,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-kinesisvideo' - context[:gem_version] = '1.59.0' + context[:gem_version] = '1.60.0' Seahorse::Client::Request.new(handlers, context) end From 3169321cc1d66a154af63caaab04793f37e24b0f Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Fri, 23 Feb 2024 19:15:41 +0000 Subject: [PATCH 5/8] Updated API models and rebuilt service gems. --- apis/appsync/2017-07-25/docs-2.json | 10 +-- apis/qldb/2019-01-02/docs-2.json | 4 +- apis/qldb/2019-01-02/endpoint-rule-set-1.json | 40 +++++----- apis/rds/2014-10-31/api-2.json | 16 ++-- apis/rds/2014-10-31/docs-2.json | 15 ++-- apis/rum/2018-05-10/docs-2.json | 18 ++--- apis/rum/2018-05-10/endpoint-rule-set-1.json | 40 +++++----- gems/aws-sdk-appsync/CHANGELOG.md | 5 ++ gems/aws-sdk-appsync/VERSION | 2 +- gems/aws-sdk-appsync/lib/aws-sdk-appsync.rb | 2 +- .../lib/aws-sdk-appsync/client.rb | 22 +++--- .../lib/aws-sdk-appsync/types.rb | 30 +++++--- gems/aws-sdk-cloudwatchrum/CHANGELOG.md | 5 ++ gems/aws-sdk-cloudwatchrum/VERSION | 2 +- .../lib/aws-sdk-cloudwatchrum.rb | 2 +- .../lib/aws-sdk-cloudwatchrum/client.rb | 40 ++++++---- .../endpoint_provider.rb | 2 +- .../lib/aws-sdk-cloudwatchrum/types.rb | 76 +++++++++++++++---- gems/aws-sdk-qldb/CHANGELOG.md | 5 ++ gems/aws-sdk-qldb/VERSION | 2 +- gems/aws-sdk-qldb/lib/aws-sdk-qldb.rb | 2 +- gems/aws-sdk-qldb/lib/aws-sdk-qldb/client.rb | 2 +- .../lib/aws-sdk-qldb/endpoint_provider.rb | 2 +- gems/aws-sdk-qldb/lib/aws-sdk-qldb/types.rb | 8 +- gems/aws-sdk-rds/CHANGELOG.md | 5 ++ gems/aws-sdk-rds/VERSION | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds.rb | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb | 10 +-- .../aws-sdk-rds/lib/aws-sdk-rds/client_api.rb | 11 +-- 29 files changed, 242 insertions(+), 140 deletions(-) diff --git a/apis/appsync/2017-07-25/docs-2.json b/apis/appsync/2017-07-25/docs-2.json index 05dfc913773..05c77e90fbd 100644 --- a/apis/appsync/2017-07-25/docs-2.json +++ b/apis/appsync/2017-07-25/docs-2.json @@ -278,8 +278,8 @@ "base": null, "refs": { "ApiCache$healthMetricsConfig": "

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

  • NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.

  • EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

", - "CreateApiCacheRequest$healthMetricsConfig": "

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

  • NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.

  • EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

", - "UpdateApiCacheRequest$healthMetricsConfig": "

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

  • NetworkBandwidthOutAllowanceExceeded: The number of times a specified GraphQL operation was called.

  • EngineCPUUtilization: The number of GraphQL errors that occurred during a specified GraphQL operation.

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

" + "CreateApiCacheRequest$healthMetricsConfig": "

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

  • NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.

  • EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

", + "UpdateApiCacheRequest$healthMetricsConfig": "

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

  • NetworkBandwidthOutAllowanceExceeded: The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.

  • EngineCPUUtilization: The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

" } }, "CachingConfig": { @@ -543,7 +543,7 @@ "DataSourceLevelMetricsBehavior": { "base": null, "refs": { - "EnhancedMetricsConfig$dataSourceLevelMetricsBehavior": "

Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

  • Requests: The number of invocations that occured during a request.

  • Latency: The time to complete a data source invocation.

  • Errors: The number of errors that occurred during a data source invocation.

These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

  • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

  • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricConfig value set to ENABLED.

" + "EnhancedMetricsConfig$dataSourceLevelMetricsBehavior": "

Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

  • Requests: The number of invocations that occured during a request.

  • Latency: The time to complete a data source invocation.

  • Errors: The number of errors that occurred during a data source invocation.

These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

  • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

  • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricsConfig value set to ENABLED.

" } }, "DataSourceLevelMetricsConfig": { @@ -750,7 +750,7 @@ } }, "EnhancedMetricsConfig": { - "base": "

Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.

Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig contains three required parameters, each controlling one of these categories:

  1. resolverLevelMetricsBehavior: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

    • GraphQL errors: The number of GraphQL errors that occurred.

    • Requests: The number of invocations that occurred during a request.

    • Latency: The time to complete a resolver invocation.

    • Cache hits: The number of cache hits during a request.

    • Cache misses: The number of cache misses during a request.

    These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

    • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricConfig value set to ENABLED.

  2. dataSourceLevelMetricsBehavior: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

    • Requests: The number of invocations that occured during a request.

    • Latency: The time to complete a data source invocation.

    • Errors: The number of errors that occurred during a data source invocation.

    These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

    • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricConfig value set to ENABLED.

  3. operationLevelMetricsConfig: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

    • Requests: The number of times a specified GraphQL operation was called.

    • GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

", + "base": "

Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.

Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig contains three required parameters, each controlling one of these categories:

  1. resolverLevelMetricsBehavior: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

    • GraphQL errors: The number of GraphQL errors that occurred.

    • Requests: The number of invocations that occurred during a request.

    • Latency: The time to complete a resolver invocation.

    • Cache hits: The number of cache hits during a request.

    • Cache misses: The number of cache misses during a request.

    These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

    • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricsConfig value set to ENABLED.

  2. dataSourceLevelMetricsBehavior: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

    • Requests: The number of invocations that occured during a request.

    • Latency: The time to complete a data source invocation.

    • Errors: The number of errors that occurred during a data source invocation.

    These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

    • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricsConfig value set to ENABLED.

  3. operationLevelMetricsConfig: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

    • Requests: The number of times a specified GraphQL operation was called.

    • GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

", "refs": { "CreateGraphqlApiRequest$enhancedMetricsConfig": "

The enhancedMetricsConfig object.

", "GraphqlApi$enhancedMetricsConfig": "

The enhancedMetricsConfig object.

", @@ -1450,7 +1450,7 @@ "ResolverLevelMetricsBehavior": { "base": null, "refs": { - "EnhancedMetricsConfig$resolverLevelMetricsBehavior": "

Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

  • GraphQL errors: The number of GraphQL errors that occurred.

  • Requests: The number of invocations that occurred during a request.

  • Latency: The time to complete a resolver invocation.

  • Cache hits: The number of cache hits during a request.

  • Cache misses: The number of cache misses during a request.

These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

  • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

  • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricConfig value set to ENABLED.

" + "EnhancedMetricsConfig$resolverLevelMetricsBehavior": "

Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

  • GraphQL errors: The number of GraphQL errors that occurred.

  • Requests: The number of invocations that occurred during a request.

  • Latency: The time to complete a resolver invocation.

  • Cache hits: The number of cache hits during a request.

  • Cache misses: The number of cache misses during a request.

These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

  • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

  • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricsConfig value set to ENABLED.

" } }, "ResolverLevelMetricsConfig": { diff --git a/apis/qldb/2019-01-02/docs-2.json b/apis/qldb/2019-01-02/docs-2.json index 61f6588ad2a..afde17e72bf 100644 --- a/apis/qldb/2019-01-02/docs-2.json +++ b/apis/qldb/2019-01-02/docs-2.json @@ -35,7 +35,7 @@ "JournalKinesisStreamDescription$Arn": "

The Amazon Resource Name (ARN) of the QLDB journal stream.

", "JournalS3ExportDescription$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal export job to do the following:

  • Write objects into your Amazon Simple Storage Service (Amazon S3) bucket.

  • (Optional) Use your customer managed key in Key Management Service (KMS) for server-side encryption of your exported data.

", "KinesisConfiguration$StreamArn": "

The Amazon Resource Name (ARN) of the Kinesis Data Streams resource.

", - "LedgerEncryptionDescription$KmsKeyArn": "

The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.

", + "LedgerEncryptionDescription$KmsKeyArn": "

The Amazon Resource Name (ARN) of the customer managed KMS key that the ledger uses for encryption at rest. If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption. It will display AWS_OWNED_KMS_KEY when updating the ledger's encryption configuration to the Amazon Web Services owned KMS key.

", "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for which to list the tags. For example:

arn:aws:qldb:us-east-1:123456789012:ledger/exampleLedger

", "S3EncryptionConfiguration$KmsKeyArn": "

The Amazon Resource Name (ARN) of a symmetric encryption key in Key Management Service (KMS). Amazon S3 does not support asymmetric KMS keys.

You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType.

KmsKeyArn is not required if you specify SSE_S3 as the ObjectEncryptionType.

", "StreamJournalToKinesisRequest$RoleArn": "

The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for a journal stream to write data records to a Kinesis Data Streams resource.

To pass a role to QLDB when requesting a journal stream, you must have permissions to perform the iam:PassRole action on the IAM role resource. This is required for all journal stream requests.

", @@ -246,7 +246,7 @@ "LedgerEncryptionDescription": { "base": "

Information about the encryption of data at rest in an Amazon QLDB ledger. This includes the current status, the key in Key Management Service (KMS), and when the key became inaccessible (in the case of an error).

For more information, see Encryption at rest in the Amazon QLDB Developer Guide.

", "refs": { - "DescribeLedgerResponse$EncryptionDescription": "

Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).

", + "DescribeLedgerResponse$EncryptionDescription": "

Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error). If this parameter is undefined, the ledger uses an Amazon Web Services owned KMS key for encryption.

", "UpdateLedgerResponse$EncryptionDescription": "

Information about the encryption of data at rest in the ledger. This includes the current status, the KMS key, and when the key became inaccessible (in the case of an error).

" } }, diff --git a/apis/qldb/2019-01-02/endpoint-rule-set-1.json b/apis/qldb/2019-01-02/endpoint-rule-set-1.json index e340028309d..b0cd79996db 100644 --- a/apis/qldb/2019-01-02/endpoint-rule-set-1.json +++ b/apis/qldb/2019-01-02/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/apis/rds/2014-10-31/api-2.json b/apis/rds/2014-10-31/api-2.json index 40c47f91a09..3755b4ad451 100644 --- a/apis/rds/2014-10-31/api-2.json +++ b/apis/rds/2014-10-31/api-2.json @@ -5385,7 +5385,7 @@ "type":"structure", "members":{ "DBShardGroupResourceId":{"shape":"String"}, - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "DBClusterIdentifier":{"shape":"String"}, "MaxACU":{"shape":"DoubleOptional"}, "ComputeRedundancy":{"shape":"IntegerOptional"}, @@ -5405,6 +5405,12 @@ }, "exception":true }, + "DBShardGroupIdentifier":{ + "type":"string", + "max":63, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*" + }, "DBShardGroupNotFoundFault":{ "type":"structure", "members":{ @@ -5832,7 +5838,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"} + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"} } }, "DeleteDBSnapshotMessage":{ @@ -6229,7 +6235,7 @@ "DescribeDBShardGroupsMessage":{ "type":"structure", "members":{ - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "Filters":{"shape":"FilterList"}, "Marker":{"shape":"String"}, "MaxRecords":{"shape":"MaxRecords"} @@ -8000,7 +8006,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"}, + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"}, "MaxACU":{"shape":"DoubleOptional"} } }, @@ -8703,7 +8709,7 @@ "type":"structure", "required":["DBShardGroupIdentifier"], "members":{ - "DBShardGroupIdentifier":{"shape":"String"} + "DBShardGroupIdentifier":{"shape":"DBShardGroupIdentifier"} } }, "RecommendedAction":{ diff --git a/apis/rds/2014-10-31/docs-2.json b/apis/rds/2014-10-31/docs-2.json index 0b08f91e522..e0570d41b75 100644 --- a/apis/rds/2014-10-31/docs-2.json +++ b/apis/rds/2014-10-31/docs-2.json @@ -1821,6 +1821,16 @@ "refs": { } }, + "DBShardGroupIdentifier": { + "base": null, + "refs": { + "DBShardGroup$DBShardGroupIdentifier": "

The name of the DB shard group.

", + "DeleteDBShardGroupMessage$DBShardGroupIdentifier": "

Teh name of the DB shard group to delete.

", + "DescribeDBShardGroupsMessage$DBShardGroupIdentifier": "

The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive.

Constraints:

  • If supplied, must match an existing DB shard group identifier.

", + "ModifyDBShardGroupMessage$DBShardGroupIdentifier": "

The name of the DB shard group to modify.

", + "RebootDBShardGroupMessage$DBShardGroupIdentifier": "

The name of the DB shard group to reboot.

" + } + }, "DBShardGroupNotFoundFault": { "base": "

The specified DB shard group name wasn't found.

", "refs": { @@ -4983,7 +4993,6 @@ "DBSecurityGroupMessage$Marker": "

An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DBSecurityGroupNameList$member": null, "DBShardGroup$DBShardGroupResourceId": "

The Amazon Web Services Region-unique, immutable identifier for the DB shard group.

", - "DBShardGroup$DBShardGroupIdentifier": "

The name of the DB shard group.

", "DBShardGroup$DBClusterIdentifier": "

The name of the primary DB cluster for the DB shard group.

", "DBShardGroup$Status": "

The status of the DB shard group.

", "DBShardGroup$Endpoint": "

The connection endpoint for the DB shard group.

", @@ -5042,7 +5051,6 @@ "DeleteDBParameterGroupMessage$DBParameterGroupName": "

The name of the DB parameter group.

Constraints:

  • Must be the name of an existing DB parameter group

  • You can't delete a default DB parameter group

  • Can't be associated with any DB instances

", "DeleteDBProxyRequest$DBProxyName": "

The name of the DB proxy to delete.

", "DeleteDBSecurityGroupMessage$DBSecurityGroupName": "

The name of the DB security group to delete.

You can't delete the default DB security group.

Constraints:

  • Must be 1 to 255 letters, numbers, or hyphens.

  • First character must be a letter

  • Can't end with a hyphen or contain two consecutive hyphens

  • Must not be \"Default\"

", - "DeleteDBShardGroupMessage$DBShardGroupIdentifier": "

Teh name of the DB shard group to delete.

", "DeleteDBSnapshotMessage$DBSnapshotIdentifier": "

The DB snapshot identifier.

Constraints: Must be the name of an existing DB snapshot in the available state.

", "DeleteDBSubnetGroupMessage$DBSubnetGroupName": "

The name of the database subnet group to delete.

You can't delete the default subnet group.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mydbsubnetgroup

", "DeleteEventSubscriptionMessage$SubscriptionName": "

The name of the RDS event notification subscription you want to delete.

", @@ -5116,7 +5124,6 @@ "DescribeDBRecommendationsMessage$Marker": "

An optional pagination token provided by a previous DescribeDBRecommendations request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBSecurityGroupsMessage$DBSecurityGroupName": "

The name of the DB security group to return details for.

", "DescribeDBSecurityGroupsMessage$Marker": "

An optional pagination token provided by a previous DescribeDBSecurityGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", - "DescribeDBShardGroupsMessage$DBShardGroupIdentifier": "

The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive.

Constraints:

  • If supplied, must match an existing DB shard group identifier.

", "DescribeDBShardGroupsMessage$Marker": "

An optional pagination token provided by a previous DescribeDBShardGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

", "DescribeDBShardGroupsResponse$Marker": "

A pagination token that can be used in a later DescribeDBClusters request.

", "DescribeDBSnapshotAttributesMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to describe the attributes for.

", @@ -5329,7 +5336,6 @@ "ModifyDBRecommendationMessage$RecommendationId": "

The identifier of the recommendation to update.

", "ModifyDBRecommendationMessage$Locale": "

The language of the modified recommendation.

", "ModifyDBRecommendationMessage$Status": "

The recommendation status to update.

Valid values:

  • active

  • dismissed

", - "ModifyDBShardGroupMessage$DBShardGroupIdentifier": "

The name of the DB shard group to modify.

", "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "

The identifier for the DB snapshot to modify the attributes for.

", "ModifyDBSnapshotAttributeMessage$AttributeName": "

The name of the DB snapshot attribute to modify.

To manage authorization for other Amazon Web Services accounts to copy or restore a manual DB snapshot, set this value to restore.

To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes API operation.

", "ModifyDBSnapshotMessage$DBSnapshotIdentifier": "

The identifier of the DB snapshot to modify.

", @@ -5432,7 +5438,6 @@ "ReadersArnList$member": null, "RebootDBClusterMessage$DBClusterIdentifier": "

The DB cluster identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBCluster.

", "RebootDBInstanceMessage$DBInstanceIdentifier": "

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must match the identifier of an existing DBInstance.

", - "RebootDBShardGroupMessage$DBShardGroupIdentifier": "

The name of the DB shard group to reboot.

", "RecommendedAction$ActionId": "

The unique identifier of the recommended action.

", "RecommendedAction$Title": "

A short description to summarize the action. The description might contain markdown.

", "RecommendedAction$Description": "

A detailed description of the action. The description might contain markdown.

", diff --git a/apis/rum/2018-05-10/docs-2.json b/apis/rum/2018-05-10/docs-2.json index 76794d81ba7..65124f74e88 100644 --- a/apis/rum/2018-05-10/docs-2.json +++ b/apis/rum/2018-05-10/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "

With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about your web application performance from actual user sessions in real time. The data collected includes page load times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and also see breakdowns by the browsers and devices that your customers use.

You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error messages, stack traces, and user sessions. You can also use RUM to understand the range of end-user impact including the number of users, geolocations, and browsers used.

", "operations": { - "BatchCreateRumMetricDefinitions": "

Specifies the extended metrics and custom metrics that you want a CloudWatch RUM app monitor to send to a destination. Valid destinations include CloudWatch and Evidently.

By default, RUM app monitors send some metrics to CloudWatch. These default metrics are listed in CloudWatch metrics that you can collect with CloudWatch RUM.

In addition to these default metrics, you can choose to send extended metrics or custom metrics or both.

  • Extended metrics enable you to send metrics with additional dimensions not included in the default metrics. You can also send extended metrics to Evidently as well as CloudWatch. The valid dimension names for the additional dimensions for extended metrics are BrowserName, CountryCode, DeviceType, FileType, OSName, and PageId. For more information, see Extended metrics that you can send to CloudWatch and CloudWatch Evidently.

  • Custom metrics are metrics that you define. You can send custom metrics to CloudWatch or to CloudWatch Evidently or to both. With custom metrics, you can use any metric name and namespace, and to derive the metrics you can use any custom events, built-in events, custom attributes, or default attributes.

    You can't send custom metrics to the AWS/RUM namespace. You must send custom metrics to a custom namespace that you define. The namespace that you use can't start with AWS/. CloudWatch RUM prepends RUM/CustomMetrics/ to the custom namespace that you define, so the final namespace for your metrics in CloudWatch is RUM/CustomMetrics/your-custom-namespace .

The maximum number of metric definitions that you can specify in one BatchCreateRumMetricDefinitions operation is 200.

The maximum number of metric definitions that one destination can contain is 2000.

Extended metrics sent to CloudWatch and RUM custom metrics are charged as CloudWatch custom metrics. Each combination of additional dimension name and dimension value counts as a custom metric. For more information, see Amazon CloudWatch Pricing.

You must have already created a destination for the metrics before you send them. For more information, see PutRumMetricsDestination.

If some metric definitions specified in a BatchCreateRumMetricDefinitions operations are not valid, those metric definitions fail and return errors, but all valid metric definitions in the same operation still succeed.

", + "BatchCreateRumMetricDefinitions": "

Specifies the extended metrics and custom metrics that you want a CloudWatch RUM app monitor to send to a destination. Valid destinations include CloudWatch and Evidently.

By default, RUM app monitors send some metrics to CloudWatch. These default metrics are listed in CloudWatch metrics that you can collect with CloudWatch RUM.

In addition to these default metrics, you can choose to send extended metrics, custom metrics, or both.

  • Extended metrics let you send metrics with additional dimensions that aren't included in the default metrics. You can also send extended metrics to both Evidently and CloudWatch. The valid dimension names for the additional dimensions for extended metrics are BrowserName, CountryCode, DeviceType, FileType, OSName, and PageId. For more information, see Extended metrics that you can send to CloudWatch and CloudWatch Evidently.

  • Custom metrics are metrics that you define. You can send custom metrics to CloudWatch. CloudWatch Evidently, or both. With custom metrics, you can use any metric name and namespace. To derive the metrics, you can use any custom events, built-in events, custom attributes, or default attributes.

    You can't send custom metrics to the AWS/RUM namespace. You must send custom metrics to a custom namespace that you define. The namespace that you use can't start with AWS/. CloudWatch RUM prepends RUM/CustomMetrics/ to the custom namespace that you define, so the final namespace for your metrics in CloudWatch is RUM/CustomMetrics/your-custom-namespace .

The maximum number of metric definitions that you can specify in one BatchCreateRumMetricDefinitions operation is 200.

The maximum number of metric definitions that one destination can contain is 2000.

Extended metrics sent to CloudWatch and RUM custom metrics are charged as CloudWatch custom metrics. Each combination of additional dimension name and dimension value counts as a custom metric. For more information, see Amazon CloudWatch Pricing.

You must have already created a destination for the metrics before you send them. For more information, see PutRumMetricsDestination.

If some metric definitions specified in a BatchCreateRumMetricDefinitions operations are not valid, those metric definitions fail and return errors, but all valid metric definitions in the same operation still succeed.

", "BatchDeleteRumMetricDefinitions": "

Removes the specified metrics from being sent to an extended metrics destination.

If some metric definition IDs specified in a BatchDeleteRumMetricDefinitions operations are not valid, those metric definitions fail and return errors, but all valid metric definition IDs in the same operation are still deleted.

The maximum number of metric definitions that you can specify in one BatchDeleteRumMetricDefinitions operation is 200.

", "BatchGetRumMetricDefinitions": "

Retrieves the list of metrics and dimensions that a RUM app monitor is sending to a single destination.

", "CreateAppMonitor": "

Creates a Amazon CloudWatch RUM app monitor, which collects telemetry data from your application and sends that data to RUM. The data includes performance and reliability information such as page load time, client-side errors, and user behavior.

You use this operation only to create a new app monitor. To update an existing app monitor, use UpdateAppMonitor instead.

After you create an app monitor, sign in to the CloudWatch RUM console to get the JavaScript code snippet to add to your web application. For more information, see How do I find a code snippet that I've already generated?

", @@ -36,8 +36,8 @@ "base": "

This structure contains much of the configuration data for the app monitor.

", "refs": { "AppMonitor$AppMonitorConfiguration": "

A structure that contains much of the configuration data for the app monitor.

", - "CreateAppMonitorRequest$AppMonitorConfiguration": "

A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.

If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.

", - "UpdateAppMonitorRequest$AppMonitorConfiguration": "

A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.

" + "CreateAppMonitorRequest$AppMonitorConfiguration": "

A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.

If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.

", + "UpdateAppMonitorRequest$AppMonitorConfiguration": "

A structure that contains much of the configuration data for the app monitor. If you are using Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own authorization method. For more information, see Authorize your application to send data to Amazon Web Services.

" } }, "AppMonitorDetails": { @@ -96,7 +96,7 @@ "Arn": { "base": null, "refs": { - "AppMonitorConfiguration$GuestRoleArn": "

The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM.

", + "AppMonitorConfiguration$GuestRoleArn": "

The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool that is used to authorize the sending of data to RUM.

It is possible that an app monitor does not have a value for GuestRoleArn. For example, this can happen when you use the console to create an app monitor and you allow CloudWatch RUM to create a new identity pool for Authorization. In this case, GuestRoleArn is not present in the GetAppMonitor response because it is not stored by the service.

If this issue affects you, you can take one of the following steps:

  • Use the Cloud Development Kit (CDK) to create an identity pool and the associated IAM role, and use that for your app monitor.

  • Make a separate GetIdentityPoolRoles call to Amazon Cognito to retrieve the GuestRoleArn.

", "ListTagsForResourceRequest$ResourceArn": "

The ARN of the resource that you want to see the tags of.

", "ListTagsForResourceResponse$ResourceArn": "

The ARN of the resource that you are viewing.

", "TagResourceRequest$ResourceArn": "

The ARN of the CloudWatch RUM resource that you're adding tags to.

", @@ -275,7 +275,7 @@ "base": null, "refs": { "MetricDefinition$EventPattern": "

The pattern that defines the metric. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.

If the metrics destination is CloudWatch and the event also matches a value in DimensionKeys, then the metric is published with the specified dimensions.

", - "MetricDefinitionRequest$EventPattern": "

The pattern that defines the metric, specified as a JSON object. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.

When you define extended metrics, the metric definition is not valid if EventPattern is omitted.

Example event patterns:

  • '{ \"event_type\": [\"com.amazon.rum.js_error_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], } }'

  • '{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Firefox\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \"<\", 2000 ] }] } }'

  • '{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], \"countryCode\": [ \"US\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \">=\", 2000, \"<\", 8000 ] }] } }'

If the metrics destination' is CloudWatch and the event also matches a value in DimensionKeys, then the metric is published with the specified dimensions.

" + "MetricDefinitionRequest$EventPattern": "

The pattern that defines the metric, specified as a JSON object. RUM checks events that happen in a user's session against the pattern, and events that match the pattern are sent to the metric destination.

When you define extended metrics, the metric definition is not valid if EventPattern is omitted.

Example event patterns:

  • '{ \"event_type\": [\"com.amazon.rum.js_error_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], } }'

  • '{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Firefox\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \"<\", 2000 ] }] } }'

  • '{ \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"metadata\": { \"browserName\": [ \"Chrome\", \"Safari\" ], \"countryCode\": [ \"US\" ] }, \"event_details\": { \"duration\": [{ \"numeric\": [ \">=\", 2000, \"<\", 8000 ] }] } }'

If the metrics destination is CloudWatch and the event also matches a value in DimensionKeys, then the metric is published with the specified dimensions.

" } }, "FavoritePages": { @@ -317,7 +317,7 @@ "base": null, "refs": { "MetricDestinationSummary$IamRoleArn": "

This field appears only when the destination is Evidently. It specifies the ARN of the IAM role that is used to write to the Evidently experiment that receives the metrics.

", - "PutRumMetricsDestinationRequest$IamRoleArn": "

This parameter is required if Destination is Evidently. If Destination is CloudWatch, do not use this parameter.

This parameter specifies the ARN of an IAM role that RUM will assume to write to the Evidently experiment that you are sending metrics to. This role must have permission to write to that experiment.

" + "PutRumMetricsDestinationRequest$IamRoleArn": "

This parameter is required if Destination is Evidently. If Destination is CloudWatch, don't use this parameter.

This parameter specifies the ARN of an IAM role that RUM will assume to write to the Evidently experiment that you are sending metrics to. This role must have permission to write to that experiment.

If you specify this parameter, you must be signed on to a role that has PassRole permissions attached to it, to allow the role to be passed. The CloudWatchAmazonCloudWatchRUMFullAccess policy doesn't include PassRole permissions.

" } }, "IdentityPoolId": { @@ -412,7 +412,7 @@ } }, "MetricDefinitionRequest": { - "base": "

Use this structure to define one extended metric or custom metric that RUM will send to CloudWatch or CloudWatch Evidently. For more information, see Additional metrics that you can send to CloudWatch and CloudWatch Evidently.

This structure is validated differently for extended metrics and custom metrics. For extended metrics that are sent to the AWS/RUM namespace, the following validations apply:

  • The Namespace parameter must be omitted or set to AWS/RUM.

  • Only certain combinations of values for Name, ValueKey, and EventPattern are valid. In addition to what is displayed in the list below, the EventPattern can also include information used by the DimensionKeys field.

    • If Name is PerformanceNavigationDuration, then ValueKeymust be event_details.duration and the EventPattern must include {\"event_type\":[\"com.amazon.rum.performance_navigation_event\"]}

    • If Name is PerformanceResourceDuration, then ValueKeymust be event_details.duration and the EventPattern must include {\"event_type\":[\"com.amazon.rum.performance_resource_event\"]}

    • If Name is NavigationSatisfiedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">\",2000] }] } }

    • If Name is NavigationToleratedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",2000,\"<\"8000] }] } }

    • If Name is NavigationFrustratedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",8000] }] } }

    • If Name is WebVitalsCumulativeLayoutShift, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.cumulative_layout_shift_event\"]}

    • If Name is WebVitalsFirstInputDelay, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.first_input_delay_event\"]}

    • If Name is WebVitalsLargestContentfulPaint, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.largest_contentful_paint_event\"]}

    • If Name is JsErrorCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.js_error_event\"]}

    • If Name is HttpErrorCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.http_event\"]}

    • If Name is SessionCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.session_start_event\"]}

For custom metrics, the following validation rules apply:

  • The namespace can't be omitted and can't be AWS/RUM. You can use the AWS/RUM namespace only for extended metrics.

  • All dimensions listed in the DimensionKeys field must be present in the value of EventPattern.

  • The values that you specify for ValueKey, EventPattern, and DimensionKeys must be fields in RUM events, so all first-level keys in these fields must be one of the keys in the list later in this section.

  • If you set a value for EventPattern, it must be a JSON object.

  • For every non-empty event_details, there must be a non-empty event_type.

  • If EventPattern contains an event_details field, it must also contain an event_type. For every built-in event_type that you use, you must use a value for event_details that corresponds to that event_type. For information about event details that correspond to event types, see RUM event details.

  • In EventPattern, any JSON array must contain only one value.

Valid key values for first-level keys in the ValueKey, EventPattern, and DimensionKeys fields:

  • account_id

  • application_Id

  • application_version

  • application_name

  • batch_id

  • event_details

  • event_id

  • event_interaction

  • event_timestamp

  • event_type

  • event_version

  • log_stream

  • metadata

  • sessionId

  • user_details

  • userId

", + "base": "

Use this structure to define one extended metric or custom metric that RUM will send to CloudWatch or CloudWatch Evidently. For more information, see Custom metrics and extended metrics that you can send to CloudWatch and CloudWatch Evidently.

This structure is validated differently for extended metrics and custom metrics. For extended metrics that are sent to the AWS/RUM namespace, the following validations apply:

  • The Namespace parameter must be omitted or set to AWS/RUM.

  • Only certain combinations of values for Name, ValueKey, and EventPattern are valid. In addition to what is displayed in the following list, the EventPattern can also include information used by the DimensionKeys field.

    • If Name is PerformanceNavigationDuration, then ValueKeymust be event_details.duration and the EventPattern must include {\"event_type\":[\"com.amazon.rum.performance_navigation_event\"]}

    • If Name is PerformanceResourceDuration, then ValueKeymust be event_details.duration and the EventPattern must include {\"event_type\":[\"com.amazon.rum.performance_resource_event\"]}

    • If Name is NavigationSatisfiedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">\",2000] }] } }

    • If Name is NavigationToleratedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",2000,\"<\"8000] }] } }

    • If Name is NavigationFrustratedTransaction, then ValueKeymust be null and the EventPattern must include { \"event_type\": [\"com.amazon.rum.performance_navigation_event\"], \"event_details\": { \"duration\": [{ \"numeric\": [\">=\",8000] }] } }

    • If Name is WebVitalsCumulativeLayoutShift, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.cumulative_layout_shift_event\"]}

    • If Name is WebVitalsFirstInputDelay, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.first_input_delay_event\"]}

    • If Name is WebVitalsLargestContentfulPaint, then ValueKeymust be event_details.value and the EventPattern must include {\"event_type\":[\"com.amazon.rum.largest_contentful_paint_event\"]}

    • If Name is JsErrorCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.js_error_event\"]}

    • If Name is HttpErrorCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.http_event\"]}

    • If Name is SessionCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.session_start_event\"]}

    • If Name is PageViewCount, then ValueKeymust be null and the EventPattern must include {\"event_type\":[\"com.amazon.rum.page_view_event\"]}

    • If Name is Http4xxCount, then ValueKeymust be null and the EventPattern must include {\"event_type\": [\"com.amazon.rum.http_event\"],\"event_details\":{\"response\":{\"status\":[{\"numeric\":[\">=\",400,\"<\",500]}]}}} }

    • If Name is Http5xxCount, then ValueKeymust be null and the EventPattern must include {\"event_type\": [\"com.amazon.rum.http_event\"],\"event_details\":{\"response\":{\"status\":[{\"numeric\":[\">=\",500,\"<=\",599]}]}}} }

For custom metrics, the following validation rules apply:

  • The namespace can't be omitted and can't be AWS/RUM. You can use the AWS/RUM namespace only for extended metrics.

  • All dimensions listed in the DimensionKeys field must be present in the value of EventPattern.

  • The values that you specify for ValueKey, EventPattern, and DimensionKeys must be fields in RUM events, so all first-level keys in these fields must be one of the keys in the list later in this section.

  • If you set a value for EventPattern, it must be a JSON object.

  • For every non-empty event_details, there must be a non-empty event_type.

  • If EventPattern contains an event_details field, it must also contain an event_type. For every built-in event_type that you use, you must use a value for event_details that corresponds to that event_type. For information about event details that correspond to event types, see RUM event details.

  • In EventPattern, any JSON array must contain only one value.

Valid key values for first-level keys in the ValueKey, EventPattern, and DimensionKeys fields:

  • account_id

  • application_Id

  • application_version

  • application_name

  • batch_id

  • event_details

  • event_id

  • event_interaction

  • event_timestamp

  • event_type

  • event_version

  • log_stream

  • metadata

  • sessionId

  • user_details

  • userId

", "refs": { "BatchCreateRumMetricDefinitionsError$MetricDefinition": "

The metric definition that caused this error.

", "MetricDefinitionsRequest$member": null, @@ -435,7 +435,7 @@ "MetricDestination": { "base": null, "refs": { - "BatchCreateRumMetricDefinitionsRequest$Destination": "

The destination to send the metrics to. Valid values are CloudWatch and Evidently. If you specify Evidently, you must also specify the ARN of the CloudWatchEvidently experiment that will receive the metrics and an IAM role that has permission to write to the experiment.

", + "BatchCreateRumMetricDefinitionsRequest$Destination": "

The destination to send the metrics to. Valid values are CloudWatch and Evidently. If you specify Evidently, you must also specify the Amazon Resource Name (ARN) of the CloudWatchEvidently experiment that will receive the metrics and an IAM role that has permission to write to the experiment.

", "BatchDeleteRumMetricDefinitionsRequest$Destination": "

Defines the destination where you want to stop sending the specified metrics. Valid values are CloudWatch and Evidently. If you specify Evidently, you must also specify the ARN of the CloudWatchEvidently experiment that is to be the destination and an IAM role that has permission to write to the experiment.

", "BatchGetRumMetricDefinitionsRequest$Destination": "

The type of destination that you want to view metrics for. Valid values are CloudWatch and Evidently.

", "DeleteRumMetricsDestinationRequest$Destination": "

The type of destination to delete. Valid values are CloudWatch and Evidently.

", @@ -765,7 +765,7 @@ "base": null, "refs": { "MetricDefinition$ValueKey": "

The field within the event object that the metric value is sourced from.

", - "MetricDefinitionRequest$ValueKey": "

The field within the event object that the metric value is sourced from.

If you omit this field, a hardcoded value of 1 is pushed as the metric value. This is useful if you just want to count the number of events that the filter catches.

If this metric is sent to CloudWatch Evidently, this field will be passed to Evidently raw and Evidently will handle data extraction from the event.

" + "MetricDefinitionRequest$ValueKey": "

The field within the event object that the metric value is sourced from.

If you omit this field, a hardcoded value of 1 is pushed as the metric value. This is useful if you want to count the number of events that the filter catches.

If this metric is sent to CloudWatch Evidently, this field will be passed to Evidently raw. Evidently will handle data extraction from the event.

" } } } diff --git a/apis/rum/2018-05-10/endpoint-rule-set-1.json b/apis/rum/2018-05-10/endpoint-rule-set-1.json index cfb04c37f30..a05b4f3a089 100644 --- a/apis/rum/2018-05-10/endpoint-rule-set-1.json +++ b/apis/rum/2018-05-10/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/gems/aws-sdk-appsync/CHANGELOG.md b/gems/aws-sdk-appsync/CHANGELOG.md index 8f3d58a655c..2c45951fd32 100644 --- a/gems/aws-sdk-appsync/CHANGELOG.md +++ b/gems/aws-sdk-appsync/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.74.0 (2024-02-23) +------------------ + +* Feature - Documentation only updates for AppSync + 1.73.0 (2024-02-12) ------------------ diff --git a/gems/aws-sdk-appsync/VERSION b/gems/aws-sdk-appsync/VERSION index 5e3a4256626..dc87e8af82f 100644 --- a/gems/aws-sdk-appsync/VERSION +++ b/gems/aws-sdk-appsync/VERSION @@ -1 +1 @@ -1.73.0 +1.74.0 diff --git a/gems/aws-sdk-appsync/lib/aws-sdk-appsync.rb b/gems/aws-sdk-appsync/lib/aws-sdk-appsync.rb index 058bb9127a6..4a584f6d2b7 100644 --- a/gems/aws-sdk-appsync/lib/aws-sdk-appsync.rb +++ b/gems/aws-sdk-appsync/lib/aws-sdk-appsync.rb @@ -52,6 +52,6 @@ # @!group service module Aws::AppSync - GEM_VERSION = '1.73.0' + GEM_VERSION = '1.74.0' end diff --git a/gems/aws-sdk-appsync/lib/aws-sdk-appsync/client.rb b/gems/aws-sdk-appsync/lib/aws-sdk-appsync/client.rb index 3d72b3111bd..a3339ba440d 100644 --- a/gems/aws-sdk-appsync/lib/aws-sdk-appsync/client.rb +++ b/gems/aws-sdk-appsync/lib/aws-sdk-appsync/client.rb @@ -615,11 +615,13 @@ def associate_source_graphql_api(params = {}, options = {}) # Controls how cache health metrics will be emitted to CloudWatch. Cache # health metrics include: # - # * NetworkBandwidthOutAllowanceExceeded: The number of times a - # specified GraphQL operation was called. + # * NetworkBandwidthOutAllowanceExceeded: The network packets dropped + # because the throughput exceeded the aggregated bandwidth limit. This + # is useful for diagnosing bottlenecks in a cache configuration. # - # * EngineCPUUtilization: The number of GraphQL errors that occurred - # during a specified GraphQL operation. + # * EngineCPUUtilization: The CPU utilization (percentage) allocated to + # the Redis process. This is useful for diagnosing bottlenecks in a + # cache configuration. # # Metrics will be recorded by API ID. You can set the value to `ENABLED` # or `DISABLED`. @@ -3391,11 +3393,13 @@ def untag_resource(params = {}, options = {}) # Controls how cache health metrics will be emitted to CloudWatch. Cache # health metrics include: # - # * NetworkBandwidthOutAllowanceExceeded: The number of times a - # specified GraphQL operation was called. + # * NetworkBandwidthOutAllowanceExceeded: The network packets dropped + # because the throughput exceeded the aggregated bandwidth limit. This + # is useful for diagnosing bottlenecks in a cache configuration. # - # * EngineCPUUtilization: The number of GraphQL errors that occurred - # during a specified GraphQL operation. + # * EngineCPUUtilization: The CPU utilization (percentage) allocated to + # the Redis process. This is useful for diagnosing bottlenecks in a + # cache configuration. # # Metrics will be recorded by API ID. You can set the value to `ENABLED` # or `DISABLED`. @@ -4239,7 +4243,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-appsync' - context[:gem_version] = '1.73.0' + context[:gem_version] = '1.74.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-appsync/lib/aws-sdk-appsync/types.rb b/gems/aws-sdk-appsync/lib/aws-sdk-appsync/types.rb index 5efddfb4235..2404c1b4ac3 100644 --- a/gems/aws-sdk-appsync/lib/aws-sdk-appsync/types.rb +++ b/gems/aws-sdk-appsync/lib/aws-sdk-appsync/types.rb @@ -749,11 +749,14 @@ class ConcurrentModificationException < Struct.new( # Controls how cache health metrics will be emitted to CloudWatch. # Cache health metrics include: # - # * NetworkBandwidthOutAllowanceExceeded: The number of times a - # specified GraphQL operation was called. + # * NetworkBandwidthOutAllowanceExceeded: The network packets dropped + # because the throughput exceeded the aggregated bandwidth limit. + # This is useful for diagnosing bottlenecks in a cache + # configuration. # - # * EngineCPUUtilization: The number of GraphQL errors that occurred - # during a specified GraphQL operation. + # * EngineCPUUtilization: The CPU utilization (percentage) allocated + # to the Redis process. This is useful for diagnosing bottlenecks in + # a cache configuration. # # Metrics will be recorded by API ID. You can set the value to # `ENABLED` or `DISABLED`. @@ -2004,7 +2007,7 @@ class ElasticsearchDataSourceConfig < Struct.new( # for all resolvers in the request. # # * `PER_RESOLVER_METRICS`: Records and emits metric data for - # resolvers that have the `metricConfig` value set to `ENABLED`. + # resolvers that have the `metricsConfig` value set to `ENABLED`. # # 2. `dataSourceLevelMetricsBehavior`: Controls how data source metrics # will be emitted to CloudWatch. Data source metrics include: @@ -2026,7 +2029,7 @@ class ElasticsearchDataSourceConfig < Struct.new( # data for all data sources in the request. # # * `PER_DATA_SOURCE_METRICS`: Records and emits metric data for - # data sources that have the `metricConfig` value set to + # data sources that have the `metricsConfig` value set to # `ENABLED`. # # 3. `operationLevelMetricsConfig`: Controls how operation metrics will @@ -2065,7 +2068,7 @@ class ElasticsearchDataSourceConfig < Struct.new( # all resolvers in the request. # # * `PER_RESOLVER_METRICS`: Records and emits metric data for - # resolvers that have the `metricConfig` value set to `ENABLED`. + # resolvers that have the `metricsConfig` value set to `ENABLED`. # @return [String] # # @!attribute [rw] data_source_level_metrics_behavior @@ -2088,7 +2091,7 @@ class ElasticsearchDataSourceConfig < Struct.new( # for all data sources in the request. # # * `PER_DATA_SOURCE_METRICS`: Records and emits metric data for data - # sources that have the `metricConfig` value set to `ENABLED`. + # sources that have the `metricsConfig` value set to `ENABLED`. # @return [String] # # @!attribute [rw] operation_level_metrics_config @@ -4400,11 +4403,14 @@ class UntagResourceResponse < Aws::EmptyStructure; end # Controls how cache health metrics will be emitted to CloudWatch. # Cache health metrics include: # - # * NetworkBandwidthOutAllowanceExceeded: The number of times a - # specified GraphQL operation was called. + # * NetworkBandwidthOutAllowanceExceeded: The network packets dropped + # because the throughput exceeded the aggregated bandwidth limit. + # This is useful for diagnosing bottlenecks in a cache + # configuration. # - # * EngineCPUUtilization: The number of GraphQL errors that occurred - # during a specified GraphQL operation. + # * EngineCPUUtilization: The CPU utilization (percentage) allocated + # to the Redis process. This is useful for diagnosing bottlenecks in + # a cache configuration. # # Metrics will be recorded by API ID. You can set the value to # `ENABLED` or `DISABLED`. diff --git a/gems/aws-sdk-cloudwatchrum/CHANGELOG.md b/gems/aws-sdk-cloudwatchrum/CHANGELOG.md index a7aa32c9291..7172e077e73 100644 --- a/gems/aws-sdk-cloudwatchrum/CHANGELOG.md +++ b/gems/aws-sdk-cloudwatchrum/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.20.0 (2024-02-23) +------------------ + +* Feature - Doc-only update for new RUM metrics that were added + 1.19.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-cloudwatchrum/VERSION b/gems/aws-sdk-cloudwatchrum/VERSION index 815d5ca06d5..39893559155 100644 --- a/gems/aws-sdk-cloudwatchrum/VERSION +++ b/gems/aws-sdk-cloudwatchrum/VERSION @@ -1 +1 @@ -1.19.0 +1.20.0 diff --git a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum.rb b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum.rb index d53ff6655e7..d23b135497c 100644 --- a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum.rb +++ b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum.rb @@ -52,6 +52,6 @@ # @!group service module Aws::CloudWatchRUM - GEM_VERSION = '1.19.0' + GEM_VERSION = '1.20.0' end diff --git a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/client.rb b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/client.rb index b740d44a269..0bf59dfd1ca 100644 --- a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/client.rb +++ b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/client.rb @@ -397,21 +397,21 @@ def initialize(*args) # with CloudWatch RUM][1]. # # In addition to these default metrics, you can choose to send extended - # metrics or custom metrics or both. + # metrics, custom metrics, or both. # - # * Extended metrics enable you to send metrics with additional - # dimensions not included in the default metrics. You can also send - # extended metrics to Evidently as well as CloudWatch. The valid + # * Extended metrics let you send metrics with additional dimensions + # that aren't included in the default metrics. You can also send + # extended metrics to both Evidently and CloudWatch. The valid # dimension names for the additional dimensions for extended metrics # are `BrowserName`, `CountryCode`, `DeviceType`, `FileType`, # `OSName`, and `PageId`. For more information, see [ Extended metrics # that you can send to CloudWatch and CloudWatch Evidently][2]. # # * Custom metrics are metrics that you define. You can send custom - # metrics to CloudWatch or to CloudWatch Evidently or to both. With - # custom metrics, you can use any metric name and namespace, and to - # derive the metrics you can use any custom events, built-in events, - # custom attributes, or default attributes. + # metrics to CloudWatch. CloudWatch Evidently, or both. With custom + # metrics, you can use any metric name and namespace. To derive the + # metrics, you can use any custom events, built-in events, custom + # attributes, or default attributes. # # You can't send custom metrics to the `AWS/RUM` namespace. You must # send custom metrics to a custom namespace that you define. The @@ -453,9 +453,9 @@ def initialize(*args) # @option params [required, String] :destination # The destination to send the metrics to. Valid values are `CloudWatch` # and `Evidently`. If you specify `Evidently`, you must also specify the - # ARN of the CloudWatchEvidently experiment that will receive the - # metrics and an IAM role that has permission to write to the - # experiment. + # Amazon Resource Name (ARN) of the CloudWatchEvidently experiment that + # will receive the metrics and an IAM role that has permission to write + # to the experiment. # # @option params [String] :destination_arn # This parameter is required if `Destination` is `Evidently`. If @@ -694,7 +694,7 @@ def batch_get_rum_metric_definitions(params = {}, options = {}) # # # - # [1]: https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html # # @option params [Types::CustomEvents] :custom_events # Specifies whether this app monitor allows the web client to define and @@ -1177,12 +1177,22 @@ def put_rum_events(params = {}, options = {}) # # @option params [String] :iam_role_arn # This parameter is required if `Destination` is `Evidently`. If - # `Destination` is `CloudWatch`, do not use this parameter. + # `Destination` is `CloudWatch`, don't use this parameter. # # This parameter specifies the ARN of an IAM role that RUM will assume # to write to the Evidently experiment that you are sending metrics to. # This role must have permission to write to that experiment. # + # If you specify this parameter, you must be signed on to a role that + # has [PassRole][1] permissions attached to it, to allow the role to be + # passed. The [ CloudWatchAmazonCloudWatchRUMFullAccess][2] policy + # doesn't include `PassRole` permissions. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html + # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/auth-and-access-control-cw.html#managed-policies-cloudwatch-RUM + # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values @@ -1314,7 +1324,7 @@ def untag_resource(params = {}, options = {}) # # # - # [1]: https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html # # @option params [Types::CustomEvents] :custom_events # Specifies whether this app monitor allows the web client to define and @@ -1454,7 +1464,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-cloudwatchrum' - context[:gem_version] = '1.19.0' + context[:gem_version] = '1.20.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/endpoint_provider.rb b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/endpoint_provider.rb index 572f0891ded..5f25a28f0a6 100644 --- a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/endpoint_provider.rb +++ b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://rum-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/types.rb b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/types.rb index 0323a9fb47a..c4aee3a40a1 100644 --- a/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/types.rb +++ b/gems/aws-sdk-cloudwatchrum/lib/aws-sdk-cloudwatchrum/types.rb @@ -139,6 +139,28 @@ class AppMonitor < Struct.new( # @!attribute [rw] guest_role_arn # The ARN of the guest IAM role that is attached to the Amazon Cognito # identity pool that is used to authorize the sending of data to RUM. + # + # It is possible that an app monitor does not have a value for + # `GuestRoleArn`. For example, this can happen when you use the + # console to create an app monitor and you allow CloudWatch RUM to + # create a new identity pool for Authorization. In this case, + # `GuestRoleArn` is not present in the [GetAppMonitor][1] response + # because it is not stored by the service. + # + # If this issue affects you, you can take one of the following steps: + # + # * Use the Cloud Development Kit (CDK) to create an identity pool and + # the associated IAM role, and use that for your app monitor. + # + # * Make a separate [GetIdentityPoolRoles][2] call to Amazon Cognito + # to retrieve the `GuestRoleArn`. + # + # + # + # + # + # [1]: https://docs.aws.amazon.com/cloudwatchrum/latest/APIReference/API_GetAppMonitor.html + # [2]: https://docs.aws.amazon.com/cognitoidentity/latest/APIReference/API_GetIdentityPoolRoles.html # @return [String] # # @!attribute [rw] identity_pool_id @@ -295,9 +317,9 @@ class BatchCreateRumMetricDefinitionsError < Struct.new( # @!attribute [rw] destination # The destination to send the metrics to. Valid values are # `CloudWatch` and `Evidently`. If you specify `Evidently`, you must - # also specify the ARN of the CloudWatchEvidently experiment that will - # receive the metrics and an IAM role that has permission to write to - # the experiment. + # also specify the Amazon Resource Name (ARN) of the + # CloudWatchEvidently experiment that will receive the metrics and an + # IAM role that has permission to write to the experiment. # @return [String] # # @!attribute [rw] destination_arn @@ -530,7 +552,7 @@ class ConflictException < Struct.new( # # # - # [1]: https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html # @return [Types::AppMonitorConfiguration] # # @!attribute [rw] custom_events @@ -997,8 +1019,8 @@ class MetricDefinition < Struct.new( # Use this structure to define one extended metric or custom metric that # RUM will send to CloudWatch or CloudWatch Evidently. For more - # information, see [ Additional metrics that you can send to CloudWatch - # and CloudWatch Evidently][1]. + # information, see [ Custom metrics and extended metrics that you can + # send to CloudWatch and CloudWatch Evidently][1]. # # This structure is validated differently for extended metrics and # custom metrics. For extended metrics that are sent to the `AWS/RUM` @@ -1008,8 +1030,8 @@ class MetricDefinition < Struct.new( # # * Only certain combinations of values for `Name`, `ValueKey`, and # `EventPattern` are valid. In addition to what is displayed in the - # list below, the `EventPattern` can also include information used by - # the `DimensionKeys` field. + # following list, the `EventPattern` can also include information used + # by the `DimensionKeys` field. # # * If `Name` is `PerformanceNavigationDuration`, then `ValueKey`must # be `event_details.duration` and the `EventPattern` must include @@ -1060,6 +1082,20 @@ class MetricDefinition < Struct.new( # `EventPattern` must include # `\{"event_type":["com.amazon.rum.session_start_event"]\}` # + # * If `Name` is `PageViewCount`, then `ValueKey`must be null and the + # `EventPattern` must include + # `\{"event_type":["com.amazon.rum.page_view_event"]\}` + # + # * If `Name` is `Http4xxCount`, then `ValueKey`must be null and the + # `EventPattern` must include `\{"event_type": + # ["com.amazon.rum.http_event"],"event_details":\{"response":\{"status":[\{"numeric":[">=",400,"<",500]\}]\}\}\} + # \}` + # + # * If `Name` is `Http5xxCount`, then `ValueKey`must be null and the + # `EventPattern` must include `\{"event_type": + # ["com.amazon.rum.http_event"],"event_details":\{"response":\{"status":[\{"numeric":[">=",500,"<=",599]\}]\}\}\} + # \}` + # # For custom metrics, the following validation rules apply: # # * The namespace can't be omitted and can't be `AWS/RUM`. You can use @@ -1123,7 +1159,7 @@ class MetricDefinition < Struct.new( # # # - # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-vended-metrics.html + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-custom-and-extended-metrics.html # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-datacollected.html#CloudWatch-RUM-datacollected-eventDetails # # @!attribute [rw] dimension_keys @@ -1175,7 +1211,7 @@ class MetricDefinition < Struct.new( # "event_details": \{ "duration": [\{ "numeric": [ ">=", 2000, "<", # 8000 ] \}] \} \}'` # - # If the metrics destination' is `CloudWatch` and the event also + # If the metrics destination is `CloudWatch` and the event also # matches a value in `DimensionKeys`, then the metric is published # with the specified dimensions. # @return [String] @@ -1228,12 +1264,12 @@ class MetricDefinition < Struct.new( # from. # # If you omit this field, a hardcoded value of 1 is pushed as the - # metric value. This is useful if you just want to count the number of + # metric value. This is useful if you want to count the number of # events that the filter catches. # # If this metric is sent to CloudWatch Evidently, this field will be - # passed to Evidently raw and Evidently will handle data extraction - # from the event. + # passed to Evidently raw. Evidently will handle data extraction from + # the event. # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/rum-2018-05-10/MetricDefinitionRequest AWS API Documentation @@ -1336,11 +1372,21 @@ class PutRumEventsResponse < Aws::EmptyStructure; end # # @!attribute [rw] iam_role_arn # This parameter is required if `Destination` is `Evidently`. If - # `Destination` is `CloudWatch`, do not use this parameter. + # `Destination` is `CloudWatch`, don't use this parameter. # # This parameter specifies the ARN of an IAM role that RUM will assume # to write to the Evidently experiment that you are sending metrics # to. This role must have permission to write to that experiment. + # + # If you specify this parameter, you must be signed on to a role that + # has [PassRole][1] permissions attached to it, to allow the role to + # be passed. The [ CloudWatchAmazonCloudWatchRUMFullAccess][2] policy + # doesn't include `PassRole` permissions. + # + # + # + # [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html + # [2]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/auth-and-access-control-cw.html#managed-policies-cloudwatch-RUM # @return [String] # # @see http://docs.aws.amazon.com/goto/WebAPI/rum-2018-05-10/PutRumMetricsDestinationRequest AWS API Documentation @@ -1573,7 +1619,7 @@ class UntagResourceResponse < Aws::EmptyStructure; end # # # - # [1]: https://docs.aws.amazon.com/monitoring/CloudWatch-RUM-get-started-authorization.html + # [1]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-get-started-authorization.html # @return [Types::AppMonitorConfiguration] # # @!attribute [rw] custom_events diff --git a/gems/aws-sdk-qldb/CHANGELOG.md b/gems/aws-sdk-qldb/CHANGELOG.md index 2076a2c75bf..cb1dfd83610 100644 --- a/gems/aws-sdk-qldb/CHANGELOG.md +++ b/gems/aws-sdk-qldb/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.38.0 (2024-02-23) +------------------ + +* Feature - Clarify possible values for KmsKeyArn and EncryptionDescription. + 1.37.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-qldb/VERSION b/gems/aws-sdk-qldb/VERSION index bf50e910e62..ebeef2f2d61 100644 --- a/gems/aws-sdk-qldb/VERSION +++ b/gems/aws-sdk-qldb/VERSION @@ -1 +1 @@ -1.37.0 +1.38.0 diff --git a/gems/aws-sdk-qldb/lib/aws-sdk-qldb.rb b/gems/aws-sdk-qldb/lib/aws-sdk-qldb.rb index 4a281bead55..68d877319ee 100644 --- a/gems/aws-sdk-qldb/lib/aws-sdk-qldb.rb +++ b/gems/aws-sdk-qldb/lib/aws-sdk-qldb.rb @@ -52,6 +52,6 @@ # @!group service module Aws::QLDB - GEM_VERSION = '1.37.0' + GEM_VERSION = '1.38.0' end diff --git a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/client.rb b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/client.rb index f8c3a619e1d..4c871c16145 100644 --- a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/client.rb +++ b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/client.rb @@ -1661,7 +1661,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-qldb' - context[:gem_version] = '1.37.0' + context[:gem_version] = '1.38.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/endpoint_provider.rb b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/endpoint_provider.rb index 0544a601ee1..939c8efbc3d 100644 --- a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/endpoint_provider.rb +++ b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://qldb-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/types.rb b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/types.rb index dfacfe2ae00..81bb46aa995 100644 --- a/gems/aws-sdk-qldb/lib/aws-sdk-qldb/types.rb +++ b/gems/aws-sdk-qldb/lib/aws-sdk-qldb/types.rb @@ -337,7 +337,9 @@ class DescribeLedgerRequest < Struct.new( # @!attribute [rw] encryption_description # Information about the encryption of data at rest in the ledger. This # includes the current status, the KMS key, and when the key became - # inaccessible (in the case of an error). + # inaccessible (in the case of an error). If this parameter is + # undefined, the ledger uses an Amazon Web Services owned KMS key for + # encryption. # @return [Types::LedgerEncryptionDescription] # # @see http://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeLedgerResponse AWS API Documentation @@ -805,7 +807,9 @@ class KinesisConfiguration < Struct.new( # The Amazon Resource Name (ARN) of the customer managed KMS key that # the ledger uses for encryption at rest. If this parameter is # undefined, the ledger uses an Amazon Web Services owned KMS key for - # encryption. + # encryption. It will display `AWS_OWNED_KMS_KEY` when updating the + # ledger's encryption configuration to the Amazon Web Services owned + # KMS key. # @return [String] # # @!attribute [rw] encryption_status diff --git a/gems/aws-sdk-rds/CHANGELOG.md b/gems/aws-sdk-rds/CHANGELOG.md index e63c7698520..4cc47d52933 100644 --- a/gems/aws-sdk-rds/CHANGELOG.md +++ b/gems/aws-sdk-rds/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.218.0 (2024-02-23) +------------------ + +* Feature - Add pattern and length based validations for DBShardGroupIdentifier + 1.217.0 (2024-02-16) ------------------ diff --git a/gems/aws-sdk-rds/VERSION b/gems/aws-sdk-rds/VERSION index b6f43969a79..aaf66f15301 100644 --- a/gems/aws-sdk-rds/VERSION +++ b/gems/aws-sdk-rds/VERSION @@ -1 +1 @@ -1.217.0 +1.218.0 diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb index e4a675af423..16c22f23b28 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb @@ -78,6 +78,6 @@ # @!group service module Aws::RDS - GEM_VERSION = '1.217.0' + GEM_VERSION = '1.218.0' end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb index 8edcd83b5fd..32d36758724 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb @@ -9739,7 +9739,7 @@ def delete_db_security_group(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.delete_db_shard_group({ - # db_shard_group_identifier: "String", # required + # db_shard_group_identifier: "DBShardGroupIdentifier", # required # }) # # @example Response structure @@ -13992,7 +13992,7 @@ def describe_db_security_groups(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.describe_db_shard_groups({ - # db_shard_group_identifier: "String", + # db_shard_group_identifier: "DBShardGroupIdentifier", # filters: [ # { # name: "String", # required @@ -21347,7 +21347,7 @@ def modify_db_recommendation(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.modify_db_shard_group({ - # db_shard_group_identifier: "String", # required + # db_shard_group_identifier: "DBShardGroupIdentifier", # required # max_acu: 1.0, # }) # @@ -23274,7 +23274,7 @@ def reboot_db_instance(params = {}, options = {}) # @example Request syntax with placeholder values # # resp = client.reboot_db_shard_group({ - # db_shard_group_identifier: "String", # required + # db_shard_group_identifier: "DBShardGroupIdentifier", # required # }) # # @example Response structure @@ -30414,7 +30414,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-rds' - context[:gem_version] = '1.217.0' + context[:gem_version] = '1.218.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb index c92336b54fb..759f2b34067 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb @@ -249,6 +249,7 @@ module ClientApi DBSecurityGroups = Shapes::ListShape.new(name: 'DBSecurityGroups') DBShardGroup = Shapes::StructureShape.new(name: 'DBShardGroup') DBShardGroupAlreadyExistsFault = Shapes::StructureShape.new(name: 'DBShardGroupAlreadyExistsFault') + DBShardGroupIdentifier = Shapes::StringShape.new(name: 'DBShardGroupIdentifier') DBShardGroupNotFoundFault = Shapes::StructureShape.new(name: 'DBShardGroupNotFoundFault') DBShardGroupsList = Shapes::ListShape.new(name: 'DBShardGroupsList') DBSnapshot = Shapes::StructureShape.new(name: 'DBSnapshot') @@ -1996,7 +1997,7 @@ module ClientApi DBSecurityGroups.member = Shapes::ShapeRef.new(shape: DBSecurityGroup, location_name: "DBSecurityGroup") DBShardGroup.add_member(:db_shard_group_resource_id, Shapes::ShapeRef.new(shape: String, location_name: "DBShardGroupResourceId")) - DBShardGroup.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: String, location_name: "DBShardGroupIdentifier")) + DBShardGroup.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: DBShardGroupIdentifier, location_name: "DBShardGroupIdentifier")) DBShardGroup.add_member(:db_cluster_identifier, Shapes::ShapeRef.new(shape: String, location_name: "DBClusterIdentifier")) DBShardGroup.add_member(:max_acu, Shapes::ShapeRef.new(shape: DoubleOptional, location_name: "MaxACU")) DBShardGroup.add_member(:compute_redundancy, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "ComputeRedundancy")) @@ -2193,7 +2194,7 @@ module ClientApi DeleteDBSecurityGroupMessage.add_member(:db_security_group_name, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBSecurityGroupName")) DeleteDBSecurityGroupMessage.struct_class = Types::DeleteDBSecurityGroupMessage - DeleteDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBShardGroupIdentifier")) + DeleteDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: DBShardGroupIdentifier, required: true, location_name: "DBShardGroupIdentifier")) DeleteDBShardGroupMessage.struct_class = Types::DeleteDBShardGroupMessage DeleteDBSnapshotMessage.add_member(:db_snapshot_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBSnapshotIdentifier")) @@ -2432,7 +2433,7 @@ module ClientApi DescribeDBSecurityGroupsMessage.add_member(:marker, Shapes::ShapeRef.new(shape: String, location_name: "Marker")) DescribeDBSecurityGroupsMessage.struct_class = Types::DescribeDBSecurityGroupsMessage - DescribeDBShardGroupsMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: String, location_name: "DBShardGroupIdentifier")) + DescribeDBShardGroupsMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: DBShardGroupIdentifier, location_name: "DBShardGroupIdentifier")) DescribeDBShardGroupsMessage.add_member(:filters, Shapes::ShapeRef.new(shape: FilterList, location_name: "Filters")) DescribeDBShardGroupsMessage.add_member(:marker, Shapes::ShapeRef.new(shape: String, location_name: "Marker")) DescribeDBShardGroupsMessage.add_member(:max_records, Shapes::ShapeRef.new(shape: MaxRecords, location_name: "MaxRecords")) @@ -3187,7 +3188,7 @@ module ClientApi ModifyDBRecommendationMessage.add_member(:recommended_action_updates, Shapes::ShapeRef.new(shape: RecommendedActionUpdateList, location_name: "RecommendedActionUpdates")) ModifyDBRecommendationMessage.struct_class = Types::ModifyDBRecommendationMessage - ModifyDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBShardGroupIdentifier")) + ModifyDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: DBShardGroupIdentifier, required: true, location_name: "DBShardGroupIdentifier")) ModifyDBShardGroupMessage.add_member(:max_acu, Shapes::ShapeRef.new(shape: DoubleOptional, location_name: "MaxACU")) ModifyDBShardGroupMessage.struct_class = Types::ModifyDBShardGroupMessage @@ -3561,7 +3562,7 @@ module ClientApi RebootDBInstanceResult.add_member(:db_instance, Shapes::ShapeRef.new(shape: DBInstance, location_name: "DBInstance")) RebootDBInstanceResult.struct_class = Types::RebootDBInstanceResult - RebootDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "DBShardGroupIdentifier")) + RebootDBShardGroupMessage.add_member(:db_shard_group_identifier, Shapes::ShapeRef.new(shape: DBShardGroupIdentifier, required: true, location_name: "DBShardGroupIdentifier")) RebootDBShardGroupMessage.struct_class = Types::RebootDBShardGroupMessage RecommendedAction.add_member(:action_id, Shapes::ShapeRef.new(shape: String, location_name: "ActionId")) From 9cf7ffb8ebab8a9db01b92668cee43c6b961a077 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Mon, 26 Feb 2024 19:17:09 +0000 Subject: [PATCH 6/8] Updated API models and rebuilt service gems. --- apis/apigateway/2015-07-09/docs-2.json | 6 +- .../2015-07-09/endpoint-rule-set-1.json | 40 ++-- apis/drs/2020-02-26/api-2.json | 42 +++- apis/drs/2020-02-26/docs-2.json | 37 +++ apis/kafkaconnect/2021-09-14/api-2.json | 215 +++++++++++++++++- apis/kafkaconnect/2021-09-14/docs-2.json | 97 +++++++- .../2021-09-14/endpoint-rule-set-1.json | 40 ++-- apis/rds/2014-10-31/api-2.json | 9 +- apis/rds/2014-10-31/docs-2.json | 5 +- gems/aws-sdk-apigateway/CHANGELOG.md | 5 + gems/aws-sdk-apigateway/VERSION | 2 +- .../lib/aws-sdk-apigateway.rb | 2 +- .../lib/aws-sdk-apigateway/client.rb | 7 +- .../aws-sdk-apigateway/endpoint_provider.rb | 2 +- .../lib/aws-sdk-apigateway/types.rb | 7 +- gems/aws-sdk-drs/CHANGELOG.md | 5 + gems/aws-sdk-drs/VERSION | 2 +- gems/aws-sdk-drs/lib/aws-sdk-drs.rb | 2 +- gems/aws-sdk-drs/lib/aws-sdk-drs/client.rb | 12 +- .../aws-sdk-drs/lib/aws-sdk-drs/client_api.rb | 17 ++ gems/aws-sdk-drs/lib/aws-sdk-drs/types.rb | 32 ++- gems/aws-sdk-drs/sig/types.rbs | 8 + gems/aws-sdk-kafkaconnect/CHANGELOG.md | 5 + gems/aws-sdk-kafkaconnect/VERSION | 2 +- .../lib/aws-sdk-kafkaconnect.rb | 2 +- .../lib/aws-sdk-kafkaconnect/client.rb | 159 ++++++++++++- .../lib/aws-sdk-kafkaconnect/client_api.rb | 112 +++++++++ .../aws-sdk-kafkaconnect/endpoint_provider.rb | 2 +- .../lib/aws-sdk-kafkaconnect/endpoints.rb | 56 +++++ .../aws-sdk-kafkaconnect/plugins/endpoints.rb | 8 + .../lib/aws-sdk-kafkaconnect/types.rb | 168 +++++++++++++- gems/aws-sdk-kafkaconnect/sig/client.rbs | 52 ++++- gems/aws-sdk-kafkaconnect/sig/types.rbs | 47 ++++ gems/aws-sdk-rds/CHANGELOG.md | 5 + gems/aws-sdk-rds/VERSION | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds.rb | 2 +- gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb | 26 ++- .../aws-sdk-rds/lib/aws-sdk-rds/client_api.rb | 3 + .../aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb | 10 + .../lib/aws-sdk-rds/db_cluster_snapshot.rb | 10 + .../lib/aws-sdk-rds/db_snapshot.rb | 6 +- gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb | 39 +++- gems/aws-sdk-rds/sig/db_cluster.rbs | 3 + gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs | 3 + gems/aws-sdk-rds/sig/types.rbs | 3 + 45 files changed, 1212 insertions(+), 107 deletions(-) diff --git a/apis/apigateway/2015-07-09/docs-2.json b/apis/apigateway/2015-07-09/docs-2.json index c11169fc467..b9b87327e54 100644 --- a/apis/apigateway/2015-07-09/docs-2.json +++ b/apis/apigateway/2015-07-09/docs-2.json @@ -114,7 +114,7 @@ "UpdateIntegrationResponse": "

Represents an update integration response.

", "UpdateMethod": "

Updates an existing Method resource.

", "UpdateMethodResponse": "

Updates an existing MethodResponse resource.

", - "UpdateModel": "

Changes information about a model.

", + "UpdateModel": "

Changes information about a model. The maximum size of the model is 400 KB.

", "UpdateRequestValidator": "

Updates a RequestValidator of a given RestApi.

", "UpdateResource": "

Changes information about a Resource resource.

", "UpdateRestApi": "

Changes information about the specified API.

", @@ -245,7 +245,7 @@ "RequestValidator$validateRequestParameters": "

A Boolean flag to indicate whether to validate request parameters (true) or not (false).

", "RestApi$disableExecuteApiEndpoint": "

Specifies whether clients can invoke your API by using the default execute-api endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com endpoint. To require that clients use a custom domain name to invoke your API, disable the default endpoint.

", "SdkConfigurationProperty$required": "

A boolean flag of an SdkType configuration property to indicate if the associated SDK configuration property is required (true) or not (false).

", - "Stage$cacheClusterEnabled": "

Specifies whether a cache cluster is enabled for the stage.

", + "Stage$cacheClusterEnabled": "

Specifies whether a cache cluster is enabled for the stage. To activate a method-level cache, set CachingEnabled to true for a method.

", "Stage$tracingEnabled": "

Specifies whether active tracing with X-ray is enabled for the Stage.

", "TlsConfig$insecureSkipVerification": "

Specifies whether or not API Gateway skips verification that the certificate for an integration endpoint is issued by a supported certificate authority. This isn’t recommended, but it enables you to use certificates that are signed by private certificate authorities, or certificates that are self-signed. If enabled, API Gateway still performs basic certificate validation, which includes checking the certificate's expiration date, hostname, and presence of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations.

Enabling insecureSkipVerification isn't recommended, especially for integrations with public HTTPS endpoints. If you enable insecureSkipVerification, you increase the risk of man-in-the-middle attacks.

" } @@ -1583,7 +1583,7 @@ "CreateModelRequest$restApiId": "

The RestApi identifier under which the Model will be created.

", "CreateModelRequest$name": "

The name of the model. Must be alphanumeric.

", "CreateModelRequest$description": "

The description of the model.

", - "CreateModelRequest$schema": "

The schema for the model. For application/json models, this should be JSON schema draft 4 model.

", + "CreateModelRequest$schema": "

The schema for the model. For application/json models, this should be JSON schema draft 4 model. The maximum size of the model is 400 KB.

", "CreateModelRequest$contentType": "

The content-type for the model.

", "CreateRequestValidatorRequest$restApiId": "

The string identifier of the associated RestApi.

", "CreateRequestValidatorRequest$name": "

The name of the to-be-created RequestValidator.

", diff --git a/apis/apigateway/2015-07-09/endpoint-rule-set-1.json b/apis/apigateway/2015-07-09/endpoint-rule-set-1.json index 61804152356..c8acc7bd13a 100644 --- a/apis/apigateway/2015-07-09/endpoint-rule-set-1.json +++ b/apis/apigateway/2015-07-09/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/apis/drs/2020-02-26/api-2.json b/apis/drs/2020-02-26/api-2.json index f82267b2347..a6bd6bc41fc 100644 --- a/apis/drs/2020-02-26/api-2.json +++ b/apis/drs/2020-02-26/api-2.json @@ -1006,6 +1006,7 @@ "forceUefi":{"shape":"Boolean"}, "rootVolumeName":{"shape":"LargeBoundedString"}, "volumeToConversionMap":{"shape":"VolumeToConversionMap"}, + "volumeToProductCodes":{"shape":"VolumeToProductCodes"}, "volumeToVolumeSize":{"shape":"VolumeToSizeMap"} } }, @@ -1148,7 +1149,8 @@ "deviceName":{"shape":"BoundedString"}, "replicatedStorageBytes":{"shape":"PositiveInteger"}, "rescannedStorageBytes":{"shape":"PositiveInteger"}, - "totalStorageBytes":{"shape":"PositiveInteger"} + "totalStorageBytes":{"shape":"PositiveInteger"}, + "volumeStatus":{"shape":"VolumeStatus"} } }, "DataReplicationInfoReplicatedDisks":{ @@ -2280,6 +2282,30 @@ "type":"long", "min":0 }, + "ProductCode":{ + "type":"structure", + "members":{ + "productCodeId":{"shape":"ProductCodeId"}, + "productCodeMode":{"shape":"ProductCodeMode"} + } + }, + "ProductCodeId":{ + "type":"string", + "max":25, + "min":25, + "pattern":"^([A-Za-z0-9])+$" + }, + "ProductCodeMode":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "ProductCodes":{ + "type":"list", + "member":{"shape":"ProductCode"} + }, "PutLaunchActionRequest":{ "type":"structure", "required":[ @@ -3271,11 +3297,25 @@ "other" ] }, + "VolumeStatus":{ + "type":"string", + "enum":[ + "REGULAR", + "CONTAINS_MARKETPLACE_PRODUCT_CODES", + "MISSING_VOLUME_ATTRIBUTES", + "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" + ] + }, "VolumeToConversionMap":{ "type":"map", "key":{"shape":"LargeBoundedString"}, "value":{"shape":"ConversionMap"} }, + "VolumeToProductCodes":{ + "type":"map", + "key":{"shape":"LargeBoundedString"}, + "value":{"shape":"ProductCodes"} + }, "VolumeToSizeMap":{ "type":"map", "key":{"shape":"LargeBoundedString"}, diff --git a/apis/drs/2020-02-26/docs-2.json b/apis/drs/2020-02-26/docs-2.json index 9fa9577e088..db3db46a702 100644 --- a/apis/drs/2020-02-26/docs-2.json +++ b/apis/drs/2020-02-26/docs-2.json @@ -885,6 +885,7 @@ "ValidationExceptionField$message": "

Validate exception field message.

", "ValidationExceptionField$name": "

Validate exception field name.

", "VolumeToConversionMap$key": null, + "VolumeToProductCodes$key": null, "VolumeToSizeMap$key": null } }, @@ -1334,6 +1335,30 @@ "VolumeToSizeMap$value": null } }, + "ProductCode": { + "base": "

Properties of a product code associated with a volume.

", + "refs": { + "ProductCodes$member": null + } + }, + "ProductCodeId": { + "base": null, + "refs": { + "ProductCode$productCodeId": "

Id of a product code associated with a volume.

" + } + }, + "ProductCodeMode": { + "base": null, + "refs": { + "ProductCode$productCodeMode": "

Mode of a product code associated with a volume.

" + } + }, + "ProductCodes": { + "base": null, + "refs": { + "VolumeToProductCodes$value": null + } + }, "PutLaunchActionRequest": { "base": null, "refs": { @@ -2054,12 +2079,24 @@ "ValidationException$reason": "

Validation exception reason.

" } }, + "VolumeStatus": { + "base": null, + "refs": { + "DataReplicationInfoReplicatedDisk$volumeStatus": "

The status of the volume.

" + } + }, "VolumeToConversionMap": { "base": null, "refs": { "ConversionProperties$volumeToConversionMap": "

A mapping between the volumes being converted and the converted snapshot ids

" } }, + "VolumeToProductCodes": { + "base": null, + "refs": { + "ConversionProperties$volumeToProductCodes": "

A mapping between the volumes being converted and the product codes associated with them

" + } + }, "VolumeToSizeMap": { "base": null, "refs": { diff --git a/apis/kafkaconnect/2021-09-14/api-2.json b/apis/kafkaconnect/2021-09-14/api-2.json index 8632fb1dd48..62234e4f0c4 100644 --- a/apis/kafkaconnect/2021-09-14/api-2.json +++ b/apis/kafkaconnect/2021-09-14/api-2.json @@ -113,6 +113,26 @@ ], "idempotent":true }, + "DeleteWorkerConfiguration":{ + "name":"DeleteWorkerConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/v1/worker-configurations/{workerConfigurationArn}", + "responseCode":200 + }, + "input":{"shape":"DeleteWorkerConfigurationRequest"}, + "output":{"shape":"DeleteWorkerConfigurationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ], + "idempotent":true + }, "DescribeConnector":{ "name":"DescribeConnector", "http":{ @@ -208,6 +228,25 @@ {"shape":"InternalServerErrorException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ] + }, "ListWorkerConfigurations":{ "name":"ListWorkerConfigurations", "http":{ @@ -227,6 +266,47 @@ {"shape":"InternalServerErrorException"} ] }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ], + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/v1/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerErrorException"} + ], + "idempotent":true + }, "UpdateConnector":{ "name":"UpdateConnector", "http":{ @@ -422,6 +502,7 @@ "logDelivery":{"shape":"LogDelivery"}, "plugins":{"shape":"__listOfPlugin"}, "serviceExecutionRoleArn":{"shape":"__string"}, + "tags":{"shape":"Tags"}, "workerConfiguration":{"shape":"WorkerConfiguration"} } }, @@ -444,7 +525,8 @@ "contentType":{"shape":"CustomPluginContentType"}, "description":{"shape":"__stringMax1024"}, "location":{"shape":"CustomPluginLocation"}, - "name":{"shape":"__stringMin1Max128"} + "name":{"shape":"__stringMin1Max128"}, + "tags":{"shape":"Tags"} } }, "CreateCustomPluginResponse":{ @@ -465,7 +547,8 @@ "members":{ "description":{"shape":"__stringMax1024"}, "name":{"shape":"__stringMin1Max128"}, - "propertiesFileContent":{"shape":"__sensitiveString"} + "propertiesFileContent":{"shape":"__sensitiveString"}, + "tags":{"shape":"Tags"} } }, "CreateWorkerConfigurationResponse":{ @@ -474,7 +557,8 @@ "creationTime":{"shape":"__timestampIso8601"}, "latestRevision":{"shape":"WorkerConfigurationRevisionSummary"}, "name":{"shape":"__string"}, - "workerConfigurationArn":{"shape":"__string"} + "workerConfigurationArn":{"shape":"__string"}, + "workerConfigurationState":{"shape":"WorkerConfigurationState"} } }, "CustomPlugin":{ @@ -596,6 +680,24 @@ "customPluginState":{"shape":"CustomPluginState"} } }, + "DeleteWorkerConfigurationRequest":{ + "type":"structure", + "required":["workerConfigurationArn"], + "members":{ + "workerConfigurationArn":{ + "shape":"__string", + "location":"uri", + "locationName":"workerConfigurationArn" + } + } + }, + "DeleteWorkerConfigurationResponse":{ + "type":"structure", + "members":{ + "workerConfigurationArn":{"shape":"__string"}, + "workerConfigurationState":{"shape":"WorkerConfigurationState"} + } + }, "DescribeConnectorRequest":{ "type":"structure", "required":["connectorArn"], @@ -670,7 +772,8 @@ "description":{"shape":"__string"}, "latestRevision":{"shape":"WorkerConfigurationRevisionDescription"}, "name":{"shape":"__string"}, - "workerConfigurationArn":{"shape":"__string"} + "workerConfigurationArn":{"shape":"__string"}, + "workerConfigurationState":{"shape":"WorkerConfigurationState"} } }, "FirehoseLogDelivery":{ @@ -796,6 +899,11 @@ "location":"querystring", "locationName":"maxResults" }, + "namePrefix":{ + "shape":"__string", + "location":"querystring", + "locationName":"namePrefix" + }, "nextToken":{ "shape":"__string", "location":"querystring", @@ -810,6 +918,23 @@ "nextToken":{"shape":"__string"} } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"__string", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"Tags"} + } + }, "ListWorkerConfigurationsRequest":{ "type":"structure", "members":{ @@ -818,6 +943,11 @@ "location":"querystring", "locationName":"maxResults" }, + "namePrefix":{ + "shape":"__string", + "location":"querystring", + "locationName":"namePrefix" + }, "nextToken":{ "shape":"__string", "location":"querystring", @@ -996,6 +1126,49 @@ "message":{"shape":"__string"} } }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"__string", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"Tags"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -1018,6 +1191,30 @@ }, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"__string", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateConnectorRequest":{ "type":"structure", "required":[ @@ -1096,6 +1293,13 @@ "revision":{"shape":"__long"} } }, + "WorkerConfigurationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, "WorkerConfigurationSummary":{ "type":"structure", "members":{ @@ -1103,7 +1307,8 @@ "description":{"shape":"__string"}, "latestRevision":{"shape":"WorkerConfigurationRevisionSummary"}, "name":{"shape":"__string"}, - "workerConfigurationArn":{"shape":"__string"} + "workerConfigurationArn":{"shape":"__string"}, + "workerConfigurationState":{"shape":"WorkerConfigurationState"} } }, "WorkerLogDelivery":{ diff --git a/apis/kafkaconnect/2021-09-14/docs-2.json b/apis/kafkaconnect/2021-09-14/docs-2.json index ba345141973..65668e761f1 100644 --- a/apis/kafkaconnect/2021-09-14/docs-2.json +++ b/apis/kafkaconnect/2021-09-14/docs-2.json @@ -2,17 +2,21 @@ "version": "2.0", "service": "

", "operations": { - "CreateConnector": "

Creates a connector using the specified properties.

", + "CreateConnector": "

Creates a connector using the specified properties.

", "CreateCustomPlugin": "

Creates a custom plugin using the specified properties.

", "CreateWorkerConfiguration": "

Creates a worker configuration using the specified properties.

", "DeleteConnector": "

Deletes the specified connector.

", "DeleteCustomPlugin": "

Deletes a custom plugin.

", + "DeleteWorkerConfiguration": "

Deletes the specified worker configuration.

", "DescribeConnector": "

Returns summary information about the connector.

", "DescribeCustomPlugin": "

A summary description of the custom plugin.

", "DescribeWorkerConfiguration": "

Returns information about a worker configuration.

", "ListConnectors": "

Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

", "ListCustomPlugins": "

Returns a list of all of the custom plugins in this account and Region.

", + "ListTagsForResource": "

Lists all the tags attached to the specified resource.

", "ListWorkerConfigurations": "

Returns a list of all of the worker configurations in this account and Region.

", + "TagResource": "

Attaches tags to the specified resource.

", + "UntagResource": "

Removes tags from the specified resource.

", "UpdateConnector": "

Updates the specified connector.

" }, "shapes": { @@ -134,7 +138,7 @@ } }, "CustomPlugin": { - "base": "

A plugin is an AWS resource that contains the code that defines a connector's logic.

", + "base": "

A plugin is an Amazon Web Services resource that contains the code that defines a connector's logic.

", "refs": { "Plugin$customPlugin": "

Details about a custom plugin.

" } @@ -212,6 +216,16 @@ "refs": { } }, + "DeleteWorkerConfigurationRequest": { + "base": null, + "refs": { + } + }, + "DeleteWorkerConfigurationResponse": { + "base": null, + "refs": { + } + }, "DescribeConnectorRequest": { "base": null, "refs": { @@ -337,6 +351,16 @@ "refs": { } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "ListWorkerConfigurationsRequest": { "base": null, "refs": { @@ -374,7 +398,7 @@ } }, "Plugin": { - "base": "

A plugin is an AWS resource that contains the code that defines your connector logic.

", + "base": "

A plugin is an Amazon Web Services resource that contains the code that defines your connector logic.

", "refs": { "__listOfPlugin$member": null } @@ -475,6 +499,45 @@ "DescribeCustomPluginResponse$stateDescription": "

Details about the state of a custom plugin.

" } }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "Tags$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$tagKeys": "

The keys of the tags that you want to remove from the resource.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tags$value": null + } + }, + "Tags": { + "base": null, + "refs": { + "CreateConnectorRequest$tags": "

The tags you want to attach to the connector.

", + "CreateCustomPluginRequest$tags": "

The tags you want to attach to the custom plugin.

", + "CreateWorkerConfigurationRequest$tags": "

The tags you want to attach to the worker configuration.

", + "ListTagsForResourceResponse$tags": "

Lists the tags attached to the specified resource in the corresponding request.

", + "TagResourceRequest$tags": "

The tags that you want to attach to the resource.

" + } + }, "TooManyRequestsException": { "base": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

", "refs": { @@ -485,6 +548,16 @@ "refs": { } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, "UpdateConnectorRequest": { "base": null, "refs": { @@ -533,6 +606,15 @@ "WorkerConfigurationSummary$latestRevision": "

The latest revision of a worker configuration.

" } }, + "WorkerConfigurationState": { + "base": null, + "refs": { + "CreateWorkerConfigurationResponse$workerConfigurationState": "

The state of the worker configuration.

", + "DeleteWorkerConfigurationResponse$workerConfigurationState": "

The state of the worker configuration.

", + "DescribeWorkerConfigurationResponse$workerConfigurationState": "

The state of the worker configuration.

", + "WorkerConfigurationSummary$workerConfigurationState": "

The state of the worker configuration.

" + } + }, "WorkerConfigurationSummary": { "base": "

The summary of a worker configuration.

", "refs": { @@ -618,7 +700,7 @@ "__listOfPlugin": { "base": null, "refs": { - "CreateConnectorRequest$plugins": "

Specifies which plugins to use for the connector.

" + "CreateConnectorRequest$plugins": "

Amazon MSK Connect does not currently support specifying multiple plugins as a list. To use more than one plugin for your connector, you can create a single custom plugin using a ZIP file that bundles multiple plugins together.

Specifies which plugin to use for the connector. You must specify a single-element list containing one customPlugin object.

" } }, "__listOfPluginDescription": { @@ -711,6 +793,8 @@ "DeleteConnectorResponse$connectorArn": "

The Amazon Resource Name (ARN) of the connector that you requested to delete.

", "DeleteCustomPluginRequest$customPluginArn": "

The Amazon Resource Name (ARN) of the custom plugin that you want to delete.

", "DeleteCustomPluginResponse$customPluginArn": "

The Amazon Resource Name (ARN) of the custom plugin that you requested to delete.

", + "DeleteWorkerConfigurationRequest$workerConfigurationArn": "

The Amazon Resource Name (ARN) of the worker configuration that you want to delete.

", + "DeleteWorkerConfigurationResponse$workerConfigurationArn": "

The Amazon Resource Name (ARN) of the worker configuration that you requested to delete.

", "DescribeConnectorRequest$connectorArn": "

The Amazon Resource Name (ARN) of the connector that you want to describe.

", "DescribeConnectorResponse$connectorArn": "

The Amazon Resource Name (ARN) of the connector.

", "DescribeConnectorResponse$connectorDescription": "

A summary description of the connector.

", @@ -733,8 +817,11 @@ "ListConnectorsRequest$connectorNamePrefix": "

The name prefix that you want to use to search for and list connectors.

", "ListConnectorsRequest$nextToken": "

If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", "ListConnectorsResponse$nextToken": "

If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

", + "ListCustomPluginsRequest$namePrefix": "

Lists custom plugin names that start with the specified text string.

", "ListCustomPluginsRequest$nextToken": "

If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", "ListCustomPluginsResponse$nextToken": "

If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource for which you want to list all attached tags.

", + "ListWorkerConfigurationsRequest$namePrefix": "

Lists worker configuration names that start with the specified text string.

", "ListWorkerConfigurationsRequest$nextToken": "

If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", "ListWorkerConfigurationsResponse$nextToken": "

If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", "NotFoundException$message": null, @@ -751,8 +838,10 @@ "ServiceUnavailableException$message": null, "StateDescription$code": "

A code that describes the state of a resource.

", "StateDescription$message": "

A message that describes the state of a resource.

", + "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource to which you want to attach tags.

", "TooManyRequestsException$message": null, "UnauthorizedException$message": null, + "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which you want to remove tags.

", "UpdateConnectorRequest$connectorArn": "

The Amazon Resource Name (ARN) of the connector that you want to update.

", "UpdateConnectorRequest$currentVersion": "

The current version of the connector that you want to update.

", "UpdateConnectorResponse$connectorArn": "

The Amazon Resource Name (ARN) of the connector.

", diff --git a/apis/kafkaconnect/2021-09-14/endpoint-rule-set-1.json b/apis/kafkaconnect/2021-09-14/endpoint-rule-set-1.json index c5b90d3def0..545020b141c 100644 --- a/apis/kafkaconnect/2021-09-14/endpoint-rule-set-1.json +++ b/apis/kafkaconnect/2021-09-14/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/apis/rds/2014-10-31/api-2.json b/apis/rds/2014-10-31/api-2.json index 3755b4ad451..f28d4dc8881 100644 --- a/apis/rds/2014-10-31/api-2.json +++ b/apis/rds/2014-10-31/api-2.json @@ -4147,7 +4147,8 @@ "IOOptimizedNextAllowedModificationTime":{"shape":"TStamp"}, "LocalWriteForwardingStatus":{"shape":"LocalWriteForwardingStatus"}, "AwsBackupRecoveryPointArn":{"shape":"String"}, - "LimitlessDatabase":{"shape":"LimitlessDatabase"} + "LimitlessDatabase":{"shape":"LimitlessDatabase"}, + "StorageThroughput":{"shape":"IntegerOptional"} }, "wrapper":true }, @@ -4188,7 +4189,8 @@ "KmsKeyId":{"shape":"String"}, "StorageType":{"shape":"String"}, "Iops":{"shape":"IntegerOptional"}, - "AwsBackupRecoveryPointArn":{"shape":"String"} + "AwsBackupRecoveryPointArn":{"shape":"String"}, + "StorageThroughput":{"shape":"IntegerOptional"} }, "wrapper":true }, @@ -4532,7 +4534,8 @@ "TagList":{"shape":"TagList"}, "DBSystemId":{"shape":"String"}, "StorageType":{"shape":"String"}, - "DbClusterResourceId":{"shape":"String"} + "DbClusterResourceId":{"shape":"String"}, + "StorageThroughput":{"shape":"IntegerOptional"} }, "wrapper":true }, diff --git a/apis/rds/2014-10-31/docs-2.json b/apis/rds/2014-10-31/docs-2.json index e0570d41b75..a9e7b65dcb6 100644 --- a/apis/rds/2014-10-31/docs-2.json +++ b/apis/rds/2014-10-31/docs-2.json @@ -3012,12 +3012,15 @@ "DBCluster$Iops": "

The Provisioned IOPS (I/O operations per second) value.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBCluster$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBCluster$PerformanceInsightsRetentionPeriod": "

The number of days to retain Performance Insights data.

This setting is only for non-Aurora Multi-AZ DB clusters.

Valid Values:

  • 7

  • month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31)

  • 731

Default: 7 days

", + "DBCluster$StorageThroughput": "

The storage throughput for the DB cluster. The throughput is automatically set based on the IOPS that you provision, and is not configurable.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBClusterAutomatedBackup$BackupRetentionPeriod": "

The retention period for the automated backups.

", "DBClusterAutomatedBackup$Iops": "

The IOPS (I/O operations per second) value for the automated backup.

This setting is only for non-Aurora Multi-AZ DB clusters.

", + "DBClusterAutomatedBackup$StorageThroughput": "

The storage throughput for the automated backup. The throughput is automatically set based on the IOPS that you provision, and is not configurable.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBClusterCapacityInfo$PendingCapacity": "

A value that specifies the capacity that the DB cluster scales to next.

", "DBClusterCapacityInfo$CurrentCapacity": "

The current capacity of the DB cluster.

", "DBClusterCapacityInfo$SecondsBeforeTimeout": "

The number of seconds before a call to ModifyCurrentDBClusterCapacity times out.

", "DBClusterMember$PromotionTier": "

A value that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

", + "DBClusterSnapshot$StorageThroughput": "

The storage throughput for the DB cluster snapshot. The throughput is automatically set based on the IOPS that you provision, and is not configurable.

This setting is only for non-Aurora Multi-AZ DB clusters.

", "DBInstance$Iops": "

The Provisioned IOPS (I/O operations per second) value for the DB instance.

", "DBInstance$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.

", "DBInstance$PromotionTier": "

The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide.

", @@ -5553,7 +5556,7 @@ "RestoreDBInstanceFromDBSnapshotMessage$CustomIamInstanceProfile": "

The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements:

  • The profile must exist in your account.

  • The profile must have an IAM role that Amazon EC2 has permissions to assume.

  • The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom.

For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide.

This setting is required for RDS Custom.

", "RestoreDBInstanceFromDBSnapshotMessage$BackupTarget": "

Specifies where automated backups and manual snapshots are stored for the restored DB instance.

Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region.

For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

", "RestoreDBInstanceFromDBSnapshotMessage$NetworkType": "

The network type of the DB instance.

Valid Values:

  • IPV4

  • DUAL

The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL).

For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide.

", - "RestoreDBInstanceFromDBSnapshotMessage$DBClusterSnapshotIdentifier": "

The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

Constraints:

  • Must match the identifier of an existing Multi-AZ DB cluster snapshot.

  • Can't be specified when DBSnapshotIdentifier is specified.

  • Must be specified when DBSnapshotIdentifier isn't specified.

  • If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot.

  • Can't be the identifier of an Aurora DB cluster snapshot.

  • Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot.

", + "RestoreDBInstanceFromDBSnapshotMessage$DBClusterSnapshotIdentifier": "

The identifier for the Multi-AZ DB cluster snapshot to restore from.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

Constraints:

  • Must match the identifier of an existing Multi-AZ DB cluster snapshot.

  • Can't be specified when DBSnapshotIdentifier is specified.

  • Must be specified when DBSnapshotIdentifier isn't specified.

  • If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the DBClusterSnapshotIdentifier must be the ARN of the shared snapshot.

  • Can't be the identifier of an Aurora DB cluster snapshot.

", "RestoreDBInstanceFromS3Message$DBName": "

The name of the database to create when the DB instance is created. Follow the naming rules specified in CreateDBInstance.

", "RestoreDBInstanceFromS3Message$DBInstanceIdentifier": "

The DB instance identifier. This parameter is stored as a lowercase string.

Constraints:

  • Must contain from 1 to 63 letters, numbers, or hyphens.

  • First character must be a letter.

  • Can't end with a hyphen or contain two consecutive hyphens.

Example: mydbinstance

", "RestoreDBInstanceFromS3Message$DBInstanceClass": "

The compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Importing from Amazon S3 isn't supported on the db.t2.micro DB instance class.

", diff --git a/gems/aws-sdk-apigateway/CHANGELOG.md b/gems/aws-sdk-apigateway/CHANGELOG.md index 55b39532819..929f351c4e5 100644 --- a/gems/aws-sdk-apigateway/CHANGELOG.md +++ b/gems/aws-sdk-apigateway/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.92.0 (2024-02-26) +------------------ + +* Feature - Documentation updates for Amazon API Gateway. + 1.91.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-apigateway/VERSION b/gems/aws-sdk-apigateway/VERSION index 6979a6c0661..7f229af9647 100644 --- a/gems/aws-sdk-apigateway/VERSION +++ b/gems/aws-sdk-apigateway/VERSION @@ -1 +1 @@ -1.91.0 +1.92.0 diff --git a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway.rb b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway.rb index 2f27760882e..71c470e7e24 100644 --- a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway.rb +++ b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway.rb @@ -52,6 +52,6 @@ # @!group service module Aws::APIGateway - GEM_VERSION = '1.91.0' + GEM_VERSION = '1.92.0' end diff --git a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/client.rb b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/client.rb index 94f9bc5e906..59e01cfb548 100644 --- a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/client.rb +++ b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/client.rb @@ -992,7 +992,7 @@ def create_domain_name(params = {}, options = {}) # # @option params [String] :schema # The schema for the model. For `application/json` models, this should - # be JSON schema draft 4 model. + # be JSON schema draft 4 model. The maximum size of the model is 400 KB. # # @option params [required, String] :content_type # The content-type for the model. @@ -6494,7 +6494,8 @@ def update_method_response(params = {}, options = {}) req.send_request(options) end - # Changes information about a model. + # Changes information about a model. The maximum size of the model is + # 400 KB. # # @option params [required, String] :rest_api_id # The string identifier of the associated RestApi. @@ -7060,7 +7061,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-apigateway' - context[:gem_version] = '1.91.0' + context[:gem_version] = '1.92.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/endpoint_provider.rb b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/endpoint_provider.rb index 42bd2e1eb6c..7a897c58c5f 100644 --- a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/endpoint_provider.rb +++ b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://apigateway-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/types.rb b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/types.rb index 71d6c67ffcc..ad9f04af8e1 100644 --- a/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/types.rb +++ b/gems/aws-sdk-apigateway/lib/aws-sdk-apigateway/types.rb @@ -890,7 +890,8 @@ class CreateDomainNameRequest < Struct.new( # # @!attribute [rw] schema # The schema for the model. For `application/json` models, this should - # be JSON schema draft 4 model. + # be JSON schema draft 4 model. The maximum size of the model is 400 + # KB. # @return [String] # # @!attribute [rw] content_type @@ -4735,7 +4736,9 @@ class ServiceUnavailableException < Struct.new( # @return [String] # # @!attribute [rw] cache_cluster_enabled - # Specifies whether a cache cluster is enabled for the stage. + # Specifies whether a cache cluster is enabled for the stage. To + # activate a method-level cache, set `CachingEnabled` to `true` for a + # method. # @return [Boolean] # # @!attribute [rw] cache_cluster_size diff --git a/gems/aws-sdk-drs/CHANGELOG.md b/gems/aws-sdk-drs/CHANGELOG.md index c9cd4f708a2..7c5b40b9e75 100644 --- a/gems/aws-sdk-drs/CHANGELOG.md +++ b/gems/aws-sdk-drs/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.29.0 (2024-02-26) +------------------ + +* Feature - Added volume status to DescribeSourceServer replicated volumes. + 1.28.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-drs/VERSION b/gems/aws-sdk-drs/VERSION index cfc730712d5..5e57fb89558 100644 --- a/gems/aws-sdk-drs/VERSION +++ b/gems/aws-sdk-drs/VERSION @@ -1 +1 @@ -1.28.0 +1.29.0 diff --git a/gems/aws-sdk-drs/lib/aws-sdk-drs.rb b/gems/aws-sdk-drs/lib/aws-sdk-drs.rb index f28145c9105..e6d3ba5c89e 100644 --- a/gems/aws-sdk-drs/lib/aws-sdk-drs.rb +++ b/gems/aws-sdk-drs/lib/aws-sdk-drs.rb @@ -52,6 +52,6 @@ # @!group service module Aws::Drs - GEM_VERSION = '1.28.0' + GEM_VERSION = '1.29.0' end diff --git a/gems/aws-sdk-drs/lib/aws-sdk-drs/client.rb b/gems/aws-sdk-drs/lib/aws-sdk-drs/client.rb index 235528de816..9a6f52f496d 100644 --- a/gems/aws-sdk-drs/lib/aws-sdk-drs/client.rb +++ b/gems/aws-sdk-drs/lib/aws-sdk-drs/client.rb @@ -498,6 +498,7 @@ def associate_source_network_stack(params = {}, options = {}) # resp.source_server.data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.source_server.data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.source_server.data_replication_info.staging_availability_zone #=> String # resp.source_server.last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.source_server.life_cycle.added_to_service_date_time #=> String @@ -1013,6 +1014,10 @@ def delete_source_server(params = {}, options = {}) # resp.items[0].event_data.conversion_properties.volume_to_conversion_map #=> Hash # resp.items[0].event_data.conversion_properties.volume_to_conversion_map["LargeBoundedString"] #=> Hash # resp.items[0].event_data.conversion_properties.volume_to_conversion_map["LargeBoundedString"]["EbsSnapshot"] #=> String + # resp.items[0].event_data.conversion_properties.volume_to_product_codes #=> Hash + # resp.items[0].event_data.conversion_properties.volume_to_product_codes["LargeBoundedString"] #=> Array + # resp.items[0].event_data.conversion_properties.volume_to_product_codes["LargeBoundedString"][0].product_code_id #=> String + # resp.items[0].event_data.conversion_properties.volume_to_product_codes["LargeBoundedString"][0].product_code_mode #=> String, one of "ENABLED", "DISABLED" # resp.items[0].event_data.conversion_properties.volume_to_volume_size #=> Hash # resp.items[0].event_data.conversion_properties.volume_to_volume_size["LargeBoundedString"] #=> Integer # resp.items[0].event_data.conversion_server_id #=> String @@ -1508,6 +1513,7 @@ def describe_source_networks(params = {}, options = {}) # resp.items[0].data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.items[0].data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.items[0].data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.items[0].data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.items[0].data_replication_info.staging_availability_zone #=> String # resp.items[0].last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.items[0].life_cycle.added_to_service_date_time #=> String @@ -1658,6 +1664,7 @@ def disconnect_recovery_instance(params = {}, options = {}) # resp.data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.data_replication_info.staging_availability_zone #=> String # resp.last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.life_cycle.added_to_service_date_time #=> String @@ -2234,6 +2241,7 @@ def put_launch_action(params = {}, options = {}) # resp.data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.data_replication_info.staging_availability_zone #=> String # resp.last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.life_cycle.added_to_service_date_time #=> String @@ -2506,6 +2514,7 @@ def start_recovery(params = {}, options = {}) # resp.source_server.data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.source_server.data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.source_server.data_replication_info.staging_availability_zone #=> String # resp.source_server.last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.source_server.life_cycle.added_to_service_date_time #=> String @@ -2742,6 +2751,7 @@ def stop_failback(params = {}, options = {}) # resp.source_server.data_replication_info.replicated_disks[0].replicated_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].rescanned_storage_bytes #=> Integer # resp.source_server.data_replication_info.replicated_disks[0].total_storage_bytes #=> Integer + # resp.source_server.data_replication_info.replicated_disks[0].volume_status #=> String, one of "REGULAR", "CONTAINS_MARKETPLACE_PRODUCT_CODES", "MISSING_VOLUME_ATTRIBUTES", "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE" # resp.source_server.data_replication_info.staging_availability_zone #=> String # resp.source_server.last_launch_result #=> String, one of "NOT_STARTED", "PENDING", "SUCCEEDED", "FAILED" # resp.source_server.life_cycle.added_to_service_date_time #=> String @@ -3481,7 +3491,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-drs' - context[:gem_version] = '1.28.0' + context[:gem_version] = '1.29.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-drs/lib/aws-sdk-drs/client_api.rb b/gems/aws-sdk-drs/lib/aws-sdk-drs/client_api.rb index 9d904d2e1e7..867f6e39459 100644 --- a/gems/aws-sdk-drs/lib/aws-sdk-drs/client_api.rb +++ b/gems/aws-sdk-drs/lib/aws-sdk-drs/client_api.rb @@ -189,6 +189,10 @@ module ClientApi ParticipatingServer = Shapes::StructureShape.new(name: 'ParticipatingServer') ParticipatingServers = Shapes::ListShape.new(name: 'ParticipatingServers') PositiveInteger = Shapes::IntegerShape.new(name: 'PositiveInteger') + ProductCode = Shapes::StructureShape.new(name: 'ProductCode') + ProductCodeId = Shapes::StringShape.new(name: 'ProductCodeId') + ProductCodeMode = Shapes::StringShape.new(name: 'ProductCodeMode') + ProductCodes = Shapes::ListShape.new(name: 'ProductCodes') PutLaunchActionRequest = Shapes::StructureShape.new(name: 'PutLaunchActionRequest') PutLaunchActionResponse = Shapes::StructureShape.new(name: 'PutLaunchActionResponse') RecoveryInstance = Shapes::StructureShape.new(name: 'RecoveryInstance') @@ -296,7 +300,9 @@ module ClientApi ValidationExceptionField = Shapes::StructureShape.new(name: 'ValidationExceptionField') ValidationExceptionFieldList = Shapes::ListShape.new(name: 'ValidationExceptionFieldList') ValidationExceptionReason = Shapes::StringShape.new(name: 'ValidationExceptionReason') + VolumeStatus = Shapes::StringShape.new(name: 'VolumeStatus') VolumeToConversionMap = Shapes::MapShape.new(name: 'VolumeToConversionMap') + VolumeToProductCodes = Shapes::MapShape.new(name: 'VolumeToProductCodes') VolumeToSizeMap = Shapes::MapShape.new(name: 'VolumeToSizeMap') VpcID = Shapes::StringShape.new(name: 'VpcID') @@ -335,6 +341,7 @@ module ClientApi ConversionProperties.add_member(:force_uefi, Shapes::ShapeRef.new(shape: Boolean, location_name: "forceUefi")) ConversionProperties.add_member(:root_volume_name, Shapes::ShapeRef.new(shape: LargeBoundedString, location_name: "rootVolumeName")) ConversionProperties.add_member(:volume_to_conversion_map, Shapes::ShapeRef.new(shape: VolumeToConversionMap, location_name: "volumeToConversionMap")) + ConversionProperties.add_member(:volume_to_product_codes, Shapes::ShapeRef.new(shape: VolumeToProductCodes, location_name: "volumeToProductCodes")) ConversionProperties.add_member(:volume_to_volume_size, Shapes::ShapeRef.new(shape: VolumeToSizeMap, location_name: "volumeToVolumeSize")) ConversionProperties.struct_class = Types::ConversionProperties @@ -405,6 +412,7 @@ module ClientApi DataReplicationInfoReplicatedDisk.add_member(:replicated_storage_bytes, Shapes::ShapeRef.new(shape: PositiveInteger, location_name: "replicatedStorageBytes")) DataReplicationInfoReplicatedDisk.add_member(:rescanned_storage_bytes, Shapes::ShapeRef.new(shape: PositiveInteger, location_name: "rescannedStorageBytes")) DataReplicationInfoReplicatedDisk.add_member(:total_storage_bytes, Shapes::ShapeRef.new(shape: PositiveInteger, location_name: "totalStorageBytes")) + DataReplicationInfoReplicatedDisk.add_member(:volume_status, Shapes::ShapeRef.new(shape: VolumeStatus, location_name: "volumeStatus")) DataReplicationInfoReplicatedDisk.struct_class = Types::DataReplicationInfoReplicatedDisk DataReplicationInfoReplicatedDisks.member = Shapes::ShapeRef.new(shape: DataReplicationInfoReplicatedDisk) @@ -807,6 +815,12 @@ module ClientApi ParticipatingServers.member = Shapes::ShapeRef.new(shape: ParticipatingServer) + ProductCode.add_member(:product_code_id, Shapes::ShapeRef.new(shape: ProductCodeId, location_name: "productCodeId")) + ProductCode.add_member(:product_code_mode, Shapes::ShapeRef.new(shape: ProductCodeMode, location_name: "productCodeMode")) + ProductCode.struct_class = Types::ProductCode + + ProductCodes.member = Shapes::ShapeRef.new(shape: ProductCode) + PutLaunchActionRequest.add_member(:action_code, Shapes::ShapeRef.new(shape: SsmDocumentName, required: true, location_name: "actionCode")) PutLaunchActionRequest.add_member(:action_id, Shapes::ShapeRef.new(shape: LaunchActionId, required: true, location_name: "actionId")) PutLaunchActionRequest.add_member(:action_version, Shapes::ShapeRef.new(shape: LaunchActionVersion, required: true, location_name: "actionVersion")) @@ -1252,6 +1266,9 @@ module ClientApi VolumeToConversionMap.key = Shapes::ShapeRef.new(shape: LargeBoundedString) VolumeToConversionMap.value = Shapes::ShapeRef.new(shape: ConversionMap) + VolumeToProductCodes.key = Shapes::ShapeRef.new(shape: LargeBoundedString) + VolumeToProductCodes.value = Shapes::ShapeRef.new(shape: ProductCodes) + VolumeToSizeMap.key = Shapes::ShapeRef.new(shape: LargeBoundedString) VolumeToSizeMap.value = Shapes::ShapeRef.new(shape: PositiveInteger) diff --git a/gems/aws-sdk-drs/lib/aws-sdk-drs/types.rb b/gems/aws-sdk-drs/lib/aws-sdk-drs/types.rb index edfd7b81f6d..829f1c2db0d 100644 --- a/gems/aws-sdk-drs/lib/aws-sdk-drs/types.rb +++ b/gems/aws-sdk-drs/lib/aws-sdk-drs/types.rb @@ -136,6 +136,11 @@ class ConflictException < Struct.new( # snapshot ids # @return [Hash>] # + # @!attribute [rw] volume_to_product_codes + # A mapping between the volumes being converted and the product codes + # associated with them + # @return [Hash>] + # # @!attribute [rw] volume_to_volume_size # A mapping between the volumes and their sizes # @return [Hash] @@ -147,6 +152,7 @@ class ConversionProperties < Struct.new( :force_uefi, :root_volume_name, :volume_to_conversion_map, + :volume_to_product_codes, :volume_to_volume_size) SENSITIVE = [] include Aws::Structure @@ -463,6 +469,10 @@ class DataReplicationInfo < Struct.new( # The total amount of data to be replicated in bytes. # @return [Integer] # + # @!attribute [rw] volume_status + # The status of the volume. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/drs-2020-02-26/DataReplicationInfoReplicatedDisk AWS API Documentation # class DataReplicationInfoReplicatedDisk < Struct.new( @@ -470,7 +480,8 @@ class DataReplicationInfoReplicatedDisk < Struct.new( :device_name, :replicated_storage_bytes, :rescanned_storage_bytes, - :total_storage_bytes) + :total_storage_bytes, + :volume_status) SENSITIVE = [] include Aws::Structure end @@ -2089,6 +2100,25 @@ class ParticipatingServer < Struct.new( include Aws::Structure end + # Properties of a product code associated with a volume. + # + # @!attribute [rw] product_code_id + # Id of a product code associated with a volume. + # @return [String] + # + # @!attribute [rw] product_code_mode + # Mode of a product code associated with a volume. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/drs-2020-02-26/ProductCode AWS API Documentation + # + class ProductCode < Struct.new( + :product_code_id, + :product_code_mode) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] action_code # Launch action code. # @return [String] diff --git a/gems/aws-sdk-drs/sig/types.rbs b/gems/aws-sdk-drs/sig/types.rbs index 614fc56bcf9..c0fb6ca56bd 100644 --- a/gems/aws-sdk-drs/sig/types.rbs +++ b/gems/aws-sdk-drs/sig/types.rbs @@ -49,6 +49,7 @@ module Aws::Drs attr_accessor force_uefi: bool attr_accessor root_volume_name: ::String attr_accessor volume_to_conversion_map: ::Hash[::String, ::Hash[::String, ::String]] + attr_accessor volume_to_product_codes: ::Hash[::String, ::Array[Types::ProductCode]] attr_accessor volume_to_volume_size: ::Hash[::String, ::Integer] SENSITIVE: [] end @@ -137,6 +138,7 @@ module Aws::Drs attr_accessor replicated_storage_bytes: ::Integer attr_accessor rescanned_storage_bytes: ::Integer attr_accessor total_storage_bytes: ::Integer + attr_accessor volume_status: ("REGULAR" | "CONTAINS_MARKETPLACE_PRODUCT_CODES" | "MISSING_VOLUME_ATTRIBUTES" | "MISSING_VOLUME_ATTRIBUTES_AND_PRECHECK_UNAVAILABLE") SENSITIVE: [] end @@ -653,6 +655,12 @@ module Aws::Drs SENSITIVE: [] end + class ProductCode + attr_accessor product_code_id: ::String + attr_accessor product_code_mode: ("ENABLED" | "DISABLED") + SENSITIVE: [] + end + class PutLaunchActionRequest attr_accessor action_code: ::String attr_accessor action_id: ::String diff --git a/gems/aws-sdk-kafkaconnect/CHANGELOG.md b/gems/aws-sdk-kafkaconnect/CHANGELOG.md index ee6fbb9288b..7de96079989 100644 --- a/gems/aws-sdk-kafkaconnect/CHANGELOG.md +++ b/gems/aws-sdk-kafkaconnect/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.20.0 (2024-02-26) +------------------ + +* Feature - Adds support for tagging, with new TagResource, UntagResource and ListTagsForResource APIs to manage tags and updates to existing APIs to allow tag on create. This release also adds support for the new DeleteWorkerConfiguration API. + 1.19.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-kafkaconnect/VERSION b/gems/aws-sdk-kafkaconnect/VERSION index 815d5ca06d5..39893559155 100644 --- a/gems/aws-sdk-kafkaconnect/VERSION +++ b/gems/aws-sdk-kafkaconnect/VERSION @@ -1 +1 @@ -1.19.0 +1.20.0 diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect.rb index 1131edd3abd..f27328953d3 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect.rb @@ -52,6 +52,6 @@ # @!group service module Aws::KafkaConnect - GEM_VERSION = '1.19.0' + GEM_VERSION = '1.20.0' end diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client.rb index 36ad8c9f4cf..1af5f2f1186 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client.rb @@ -421,7 +421,13 @@ def initialize(*args) # Details about log delivery. # # @option params [required, Array] :plugins - # Specifies which plugins to use for the connector. + # Amazon MSK Connect does not currently support specifying multiple + # plugins as a list. To use more than one plugin for your connector, you + # can create a single custom plugin using a ZIP file that bundles + # multiple plugins together. + # + # Specifies which plugin to use for the connector. You must specify a + # single-element list containing one `customPlugin` object. # # @option params [required, String] :service_execution_role_arn # The Amazon Resource Name (ARN) of the IAM role used by the connector @@ -430,6 +436,9 @@ def initialize(*args) # connector that has Amazon S3 as a destination must have permissions # that allow it to write to the S3 destination bucket. # + # @option params [Hash] :tags + # The tags you want to attach to the connector. + # # @option params [Types::WorkerConfiguration] :worker_configuration # Specifies which worker configuration to use with the connector. # @@ -506,6 +515,9 @@ def initialize(*args) # }, # ], # service_execution_role_arn: "__string", # required + # tags: { + # "TagKey" => "TagValue", + # }, # worker_configuration: { # revision: 1, # required # worker_configuration_arn: "__string", # required @@ -541,6 +553,9 @@ def create_connector(params = {}, options = {}) # @option params [required, String] :name # The name of the custom plugin. # + # @option params [Hash] :tags + # The tags you want to attach to the custom plugin. + # # @return [Types::CreateCustomPluginResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateCustomPluginResponse#custom_plugin_arn #custom_plugin_arn} => String @@ -561,6 +576,9 @@ def create_connector(params = {}, options = {}) # }, # }, # name: "__stringMin1Max128", # required + # tags: { + # "TagKey" => "TagValue", + # }, # }) # # @example Response structure @@ -590,12 +608,16 @@ def create_custom_plugin(params = {}, options = {}) # @option params [required, String] :properties_file_content # Base64 encoded contents of connect-distributed.properties file. # + # @option params [Hash] :tags + # The tags you want to attach to the worker configuration. + # # @return [Types::CreateWorkerConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateWorkerConfigurationResponse#creation_time #creation_time} => Time # * {Types::CreateWorkerConfigurationResponse#latest_revision #latest_revision} => Types::WorkerConfigurationRevisionSummary # * {Types::CreateWorkerConfigurationResponse#name #name} => String # * {Types::CreateWorkerConfigurationResponse#worker_configuration_arn #worker_configuration_arn} => String + # * {Types::CreateWorkerConfigurationResponse#worker_configuration_state #worker_configuration_state} => String # # @example Request syntax with placeholder values # @@ -603,6 +625,9 @@ def create_custom_plugin(params = {}, options = {}) # description: "__stringMax1024", # name: "__stringMin1Max128", # required # properties_file_content: "__sensitiveString", # required + # tags: { + # "TagKey" => "TagValue", + # }, # }) # # @example Response structure @@ -613,6 +638,7 @@ def create_custom_plugin(params = {}, options = {}) # resp.latest_revision.revision #=> Integer # resp.name #=> String # resp.worker_configuration_arn #=> String + # resp.worker_configuration_state #=> String, one of "ACTIVE", "DELETING" # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/CreateWorkerConfiguration AWS API Documentation # @@ -689,6 +715,37 @@ def delete_custom_plugin(params = {}, options = {}) req.send_request(options) end + # Deletes the specified worker configuration. + # + # @option params [required, String] :worker_configuration_arn + # The Amazon Resource Name (ARN) of the worker configuration that you + # want to delete. + # + # @return [Types::DeleteWorkerConfigurationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DeleteWorkerConfigurationResponse#worker_configuration_arn #worker_configuration_arn} => String + # * {Types::DeleteWorkerConfigurationResponse#worker_configuration_state #worker_configuration_state} => String + # + # @example Request syntax with placeholder values + # + # resp = client.delete_worker_configuration({ + # worker_configuration_arn: "__string", # required + # }) + # + # @example Response structure + # + # resp.worker_configuration_arn #=> String + # resp.worker_configuration_state #=> String, one of "ACTIVE", "DELETING" + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/DeleteWorkerConfiguration AWS API Documentation + # + # @overload delete_worker_configuration(params = {}) + # @param [Hash] params ({}) + def delete_worker_configuration(params = {}, options = {}) + req = build_request(:delete_worker_configuration, params) + req.send_request(options) + end + # Returns summary information about the connector. # # @option params [required, String] :connector_arn @@ -833,6 +890,7 @@ def describe_custom_plugin(params = {}, options = {}) # * {Types::DescribeWorkerConfigurationResponse#latest_revision #latest_revision} => Types::WorkerConfigurationRevisionDescription # * {Types::DescribeWorkerConfigurationResponse#name #name} => String # * {Types::DescribeWorkerConfigurationResponse#worker_configuration_arn #worker_configuration_arn} => String + # * {Types::DescribeWorkerConfigurationResponse#worker_configuration_state #worker_configuration_state} => String # # @example Request syntax with placeholder values # @@ -850,6 +908,7 @@ def describe_custom_plugin(params = {}, options = {}) # resp.latest_revision.revision #=> Integer # resp.name #=> String # resp.worker_configuration_arn #=> String + # resp.worker_configuration_state #=> String, one of "ACTIVE", "DELETING" # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/DescribeWorkerConfiguration AWS API Documentation # @@ -946,6 +1005,9 @@ def list_connectors(params = {}, options = {}) # @option params [Integer] :max_results # The maximum number of custom plugins to list in one response. # + # @option params [String] :name_prefix + # Lists custom plugin names that start with the specified text string. + # # @option params [String] :next_token # If the response of a ListCustomPlugins operation is truncated, it will # include a NextToken. Send this NextToken in a subsequent request to @@ -962,6 +1024,7 @@ def list_connectors(params = {}, options = {}) # # resp = client.list_custom_plugins({ # max_results: 1, + # name_prefix: "__string", # next_token: "__string", # }) # @@ -993,12 +1056,46 @@ def list_custom_plugins(params = {}, options = {}) req.send_request(options) end + # Lists all the tags attached to the specified resource. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) of the resource for which you want to + # list all attached tags. + # + # @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListTagsForResourceResponse#tags #tags} => Hash<String,String> + # + # @example Request syntax with placeholder values + # + # resp = client.list_tags_for_resource({ + # resource_arn: "__string", # required + # }) + # + # @example Response structure + # + # resp.tags #=> Hash + # resp.tags["TagKey"] #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/ListTagsForResource AWS API Documentation + # + # @overload list_tags_for_resource(params = {}) + # @param [Hash] params ({}) + def list_tags_for_resource(params = {}, options = {}) + req = build_request(:list_tags_for_resource, params) + req.send_request(options) + end + # Returns a list of all of the worker configurations in this account and # Region. # # @option params [Integer] :max_results # The maximum number of worker configurations to list in one response. # + # @option params [String] :name_prefix + # Lists worker configuration names that start with the specified text + # string. + # # @option params [String] :next_token # If the response of a ListWorkerConfigurations operation is truncated, # it will include a NextToken. Send this NextToken in a subsequent @@ -1016,6 +1113,7 @@ def list_custom_plugins(params = {}, options = {}) # # resp = client.list_worker_configurations({ # max_results: 1, + # name_prefix: "__string", # next_token: "__string", # }) # @@ -1030,6 +1128,7 @@ def list_custom_plugins(params = {}, options = {}) # resp.worker_configurations[0].latest_revision.revision #=> Integer # resp.worker_configurations[0].name #=> String # resp.worker_configurations[0].worker_configuration_arn #=> String + # resp.worker_configurations[0].worker_configuration_state #=> String, one of "ACTIVE", "DELETING" # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/ListWorkerConfigurations AWS API Documentation # @@ -1040,6 +1139,62 @@ def list_worker_configurations(params = {}, options = {}) req.send_request(options) end + # Attaches tags to the specified resource. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) of the resource to which you want to + # attach tags. + # + # @option params [required, Hash] :tags + # The tags that you want to attach to the resource. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.tag_resource({ + # resource_arn: "__string", # required + # tags: { # required + # "TagKey" => "TagValue", + # }, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/TagResource AWS API Documentation + # + # @overload tag_resource(params = {}) + # @param [Hash] params ({}) + def tag_resource(params = {}, options = {}) + req = build_request(:tag_resource, params) + req.send_request(options) + end + + # Removes tags from the specified resource. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) of the resource from which you want to + # remove tags. + # + # @option params [required, Array] :tag_keys + # The keys of the tags that you want to remove from the resource. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.untag_resource({ + # resource_arn: "__string", # required + # tag_keys: ["TagKey"], # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/UntagResource AWS API Documentation + # + # @overload untag_resource(params = {}) + # @param [Hash] params ({}) + def untag_resource(params = {}, options = {}) + req = build_request(:untag_resource, params) + req.send_request(options) + end + # Updates the specified connector. # # @option params [required, Types::CapacityUpdate] :capacity @@ -1108,7 +1263,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-kafkaconnect' - context[:gem_version] = '1.19.0' + context[:gem_version] = '1.20.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client_api.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client_api.rb index 4d40ea63f0e..52dbd28a880 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client_api.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/client_api.rb @@ -46,6 +46,8 @@ module ClientApi DeleteConnectorResponse = Shapes::StructureShape.new(name: 'DeleteConnectorResponse') DeleteCustomPluginRequest = Shapes::StructureShape.new(name: 'DeleteCustomPluginRequest') DeleteCustomPluginResponse = Shapes::StructureShape.new(name: 'DeleteCustomPluginResponse') + DeleteWorkerConfigurationRequest = Shapes::StructureShape.new(name: 'DeleteWorkerConfigurationRequest') + DeleteWorkerConfigurationResponse = Shapes::StructureShape.new(name: 'DeleteWorkerConfigurationResponse') DescribeConnectorRequest = Shapes::StructureShape.new(name: 'DescribeConnectorRequest') DescribeConnectorResponse = Shapes::StructureShape.new(name: 'DescribeConnectorResponse') DescribeCustomPluginRequest = Shapes::StructureShape.new(name: 'DescribeCustomPluginRequest') @@ -68,6 +70,8 @@ module ClientApi ListConnectorsResponse = Shapes::StructureShape.new(name: 'ListConnectorsResponse') ListCustomPluginsRequest = Shapes::StructureShape.new(name: 'ListCustomPluginsRequest') ListCustomPluginsResponse = Shapes::StructureShape.new(name: 'ListCustomPluginsResponse') + ListTagsForResourceRequest = Shapes::StructureShape.new(name: 'ListTagsForResourceRequest') + ListTagsForResourceResponse = Shapes::StructureShape.new(name: 'ListTagsForResourceResponse') ListWorkerConfigurationsRequest = Shapes::StructureShape.new(name: 'ListWorkerConfigurationsRequest') ListWorkerConfigurationsResponse = Shapes::StructureShape.new(name: 'ListWorkerConfigurationsResponse') LogDelivery = Shapes::StructureShape.new(name: 'LogDelivery') @@ -91,8 +95,16 @@ module ClientApi ScaleOutPolicyUpdate = Shapes::StructureShape.new(name: 'ScaleOutPolicyUpdate') ServiceUnavailableException = Shapes::StructureShape.new(name: 'ServiceUnavailableException') StateDescription = Shapes::StructureShape.new(name: 'StateDescription') + TagKey = Shapes::StringShape.new(name: 'TagKey') + TagKeyList = Shapes::ListShape.new(name: 'TagKeyList') + TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest') + TagResourceResponse = Shapes::StructureShape.new(name: 'TagResourceResponse') + TagValue = Shapes::StringShape.new(name: 'TagValue') + Tags = Shapes::MapShape.new(name: 'Tags') TooManyRequestsException = Shapes::StructureShape.new(name: 'TooManyRequestsException') UnauthorizedException = Shapes::StructureShape.new(name: 'UnauthorizedException') + UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest') + UntagResourceResponse = Shapes::StructureShape.new(name: 'UntagResourceResponse') UpdateConnectorRequest = Shapes::StructureShape.new(name: 'UpdateConnectorRequest') UpdateConnectorResponse = Shapes::StructureShape.new(name: 'UpdateConnectorResponse') Vpc = Shapes::StructureShape.new(name: 'Vpc') @@ -101,6 +113,7 @@ module ClientApi WorkerConfigurationDescription = Shapes::StructureShape.new(name: 'WorkerConfigurationDescription') WorkerConfigurationRevisionDescription = Shapes::StructureShape.new(name: 'WorkerConfigurationRevisionDescription') WorkerConfigurationRevisionSummary = Shapes::StructureShape.new(name: 'WorkerConfigurationRevisionSummary') + WorkerConfigurationState = Shapes::StringShape.new(name: 'WorkerConfigurationState') WorkerConfigurationSummary = Shapes::StructureShape.new(name: 'WorkerConfigurationSummary') WorkerLogDelivery = Shapes::StructureShape.new(name: 'WorkerLogDelivery') WorkerLogDeliveryDescription = Shapes::StructureShape.new(name: 'WorkerLogDeliveryDescription') @@ -207,6 +220,7 @@ module ClientApi CreateConnectorRequest.add_member(:log_delivery, Shapes::ShapeRef.new(shape: LogDelivery, location_name: "logDelivery")) CreateConnectorRequest.add_member(:plugins, Shapes::ShapeRef.new(shape: __listOfPlugin, required: true, location_name: "plugins")) CreateConnectorRequest.add_member(:service_execution_role_arn, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "serviceExecutionRoleArn")) + CreateConnectorRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "tags")) CreateConnectorRequest.add_member(:worker_configuration, Shapes::ShapeRef.new(shape: WorkerConfiguration, location_name: "workerConfiguration")) CreateConnectorRequest.struct_class = Types::CreateConnectorRequest @@ -219,6 +233,7 @@ module ClientApi CreateCustomPluginRequest.add_member(:description, Shapes::ShapeRef.new(shape: __stringMax1024, location_name: "description")) CreateCustomPluginRequest.add_member(:location, Shapes::ShapeRef.new(shape: CustomPluginLocation, required: true, location_name: "location")) CreateCustomPluginRequest.add_member(:name, Shapes::ShapeRef.new(shape: __stringMin1Max128, required: true, location_name: "name")) + CreateCustomPluginRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "tags")) CreateCustomPluginRequest.struct_class = Types::CreateCustomPluginRequest CreateCustomPluginResponse.add_member(:custom_plugin_arn, Shapes::ShapeRef.new(shape: __string, location_name: "customPluginArn")) @@ -230,12 +245,14 @@ module ClientApi CreateWorkerConfigurationRequest.add_member(:description, Shapes::ShapeRef.new(shape: __stringMax1024, location_name: "description")) CreateWorkerConfigurationRequest.add_member(:name, Shapes::ShapeRef.new(shape: __stringMin1Max128, required: true, location_name: "name")) CreateWorkerConfigurationRequest.add_member(:properties_file_content, Shapes::ShapeRef.new(shape: __sensitiveString, required: true, location_name: "propertiesFileContent")) + CreateWorkerConfigurationRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "tags")) CreateWorkerConfigurationRequest.struct_class = Types::CreateWorkerConfigurationRequest CreateWorkerConfigurationResponse.add_member(:creation_time, Shapes::ShapeRef.new(shape: __timestampIso8601, location_name: "creationTime")) CreateWorkerConfigurationResponse.add_member(:latest_revision, Shapes::ShapeRef.new(shape: WorkerConfigurationRevisionSummary, location_name: "latestRevision")) CreateWorkerConfigurationResponse.add_member(:name, Shapes::ShapeRef.new(shape: __string, location_name: "name")) CreateWorkerConfigurationResponse.add_member(:worker_configuration_arn, Shapes::ShapeRef.new(shape: __string, location_name: "workerConfigurationArn")) + CreateWorkerConfigurationResponse.add_member(:worker_configuration_state, Shapes::ShapeRef.new(shape: WorkerConfigurationState, location_name: "workerConfigurationState")) CreateWorkerConfigurationResponse.struct_class = Types::CreateWorkerConfigurationResponse CustomPlugin.add_member(:custom_plugin_arn, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "customPluginArn")) @@ -287,6 +304,13 @@ module ClientApi DeleteCustomPluginResponse.add_member(:custom_plugin_state, Shapes::ShapeRef.new(shape: CustomPluginState, location_name: "customPluginState")) DeleteCustomPluginResponse.struct_class = Types::DeleteCustomPluginResponse + DeleteWorkerConfigurationRequest.add_member(:worker_configuration_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "workerConfigurationArn")) + DeleteWorkerConfigurationRequest.struct_class = Types::DeleteWorkerConfigurationRequest + + DeleteWorkerConfigurationResponse.add_member(:worker_configuration_arn, Shapes::ShapeRef.new(shape: __string, location_name: "workerConfigurationArn")) + DeleteWorkerConfigurationResponse.add_member(:worker_configuration_state, Shapes::ShapeRef.new(shape: WorkerConfigurationState, location_name: "workerConfigurationState")) + DeleteWorkerConfigurationResponse.struct_class = Types::DeleteWorkerConfigurationResponse + DescribeConnectorRequest.add_member(:connector_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "connectorArn")) DescribeConnectorRequest.struct_class = Types::DescribeConnectorRequest @@ -329,6 +353,7 @@ module ClientApi DescribeWorkerConfigurationResponse.add_member(:latest_revision, Shapes::ShapeRef.new(shape: WorkerConfigurationRevisionDescription, location_name: "latestRevision")) DescribeWorkerConfigurationResponse.add_member(:name, Shapes::ShapeRef.new(shape: __string, location_name: "name")) DescribeWorkerConfigurationResponse.add_member(:worker_configuration_arn, Shapes::ShapeRef.new(shape: __string, location_name: "workerConfigurationArn")) + DescribeWorkerConfigurationResponse.add_member(:worker_configuration_state, Shapes::ShapeRef.new(shape: WorkerConfigurationState, location_name: "workerConfigurationState")) DescribeWorkerConfigurationResponse.struct_class = Types::DescribeWorkerConfigurationResponse FirehoseLogDelivery.add_member(:delivery_stream, Shapes::ShapeRef.new(shape: __string, location_name: "deliveryStream")) @@ -373,6 +398,7 @@ module ClientApi ListConnectorsResponse.struct_class = Types::ListConnectorsResponse ListCustomPluginsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults")) + ListCustomPluginsRequest.add_member(:name_prefix, Shapes::ShapeRef.new(shape: __string, location: "querystring", location_name: "namePrefix")) ListCustomPluginsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: __string, location: "querystring", location_name: "nextToken")) ListCustomPluginsRequest.struct_class = Types::ListCustomPluginsRequest @@ -380,7 +406,14 @@ module ClientApi ListCustomPluginsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: __string, location_name: "nextToken")) ListCustomPluginsResponse.struct_class = Types::ListCustomPluginsResponse + ListTagsForResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "resourceArn")) + ListTagsForResourceRequest.struct_class = Types::ListTagsForResourceRequest + + ListTagsForResourceResponse.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, location_name: "tags")) + ListTagsForResourceResponse.struct_class = Types::ListTagsForResourceResponse + ListWorkerConfigurationsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location: "querystring", location_name: "maxResults")) + ListWorkerConfigurationsRequest.add_member(:name_prefix, Shapes::ShapeRef.new(shape: __string, location: "querystring", location_name: "namePrefix")) ListWorkerConfigurationsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: __string, location: "querystring", location_name: "nextToken")) ListWorkerConfigurationsRequest.struct_class = Types::ListWorkerConfigurationsRequest @@ -460,12 +493,29 @@ module ClientApi StateDescription.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message")) StateDescription.struct_class = Types::StateDescription + TagKeyList.member = Shapes::ShapeRef.new(shape: TagKey) + + TagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "resourceArn")) + TagResourceRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, required: true, location_name: "tags")) + TagResourceRequest.struct_class = Types::TagResourceRequest + + TagResourceResponse.struct_class = Types::TagResourceResponse + + Tags.key = Shapes::ShapeRef.new(shape: TagKey) + Tags.value = Shapes::ShapeRef.new(shape: TagValue) + TooManyRequestsException.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message")) TooManyRequestsException.struct_class = Types::TooManyRequestsException UnauthorizedException.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message")) UnauthorizedException.struct_class = Types::UnauthorizedException + UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "resourceArn")) + UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location: "querystring", location_name: "tagKeys")) + UntagResourceRequest.struct_class = Types::UntagResourceRequest + + UntagResourceResponse.struct_class = Types::UntagResourceResponse + UpdateConnectorRequest.add_member(:capacity, Shapes::ShapeRef.new(shape: CapacityUpdate, required: true, location_name: "capacity")) UpdateConnectorRequest.add_member(:connector_arn, Shapes::ShapeRef.new(shape: __string, required: true, location: "uri", location_name: "connectorArn")) UpdateConnectorRequest.add_member(:current_version, Shapes::ShapeRef.new(shape: __string, required: true, location: "querystring", location_name: "currentVersion")) @@ -507,6 +557,7 @@ module ClientApi WorkerConfigurationSummary.add_member(:latest_revision, Shapes::ShapeRef.new(shape: WorkerConfigurationRevisionSummary, location_name: "latestRevision")) WorkerConfigurationSummary.add_member(:name, Shapes::ShapeRef.new(shape: __string, location_name: "name")) WorkerConfigurationSummary.add_member(:worker_configuration_arn, Shapes::ShapeRef.new(shape: __string, location_name: "workerConfigurationArn")) + WorkerConfigurationSummary.add_member(:worker_configuration_state, Shapes::ShapeRef.new(shape: WorkerConfigurationState, location_name: "workerConfigurationState")) WorkerConfigurationSummary.struct_class = Types::WorkerConfigurationSummary WorkerLogDelivery.add_member(:cloud_watch_logs, Shapes::ShapeRef.new(shape: CloudWatchLogsLogDelivery, location_name: "cloudWatchLogs")) @@ -631,6 +682,21 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) end) + api.add_operation(:delete_worker_configuration, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteWorkerConfiguration" + o.http_method = "DELETE" + o.http_request_uri = "/v1/worker-configurations/{workerConfigurationArn}" + o.input = Shapes::ShapeRef.new(shape: DeleteWorkerConfigurationRequest) + o.output = Shapes::ShapeRef.new(shape: DeleteWorkerConfigurationResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: BadRequestException) + o.errors << Shapes::ShapeRef.new(shape: ForbiddenException) + o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) + end) + api.add_operation(:describe_connector, Seahorse::Model::Operation.new.tap do |o| o.name = "DescribeConnector" o.http_method = "GET" @@ -718,6 +784,21 @@ module ClientApi ) end) + api.add_operation(:list_tags_for_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListTagsForResource" + o.http_method = "GET" + o.http_request_uri = "/v1/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: ListTagsForResourceRequest) + o.output = Shapes::ShapeRef.new(shape: ListTagsForResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: BadRequestException) + o.errors << Shapes::ShapeRef.new(shape: ForbiddenException) + o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) + end) + api.add_operation(:list_worker_configurations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListWorkerConfigurations" o.http_method = "GET" @@ -739,6 +820,37 @@ module ClientApi ) end) + api.add_operation(:tag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "TagResource" + o.http_method = "POST" + o.http_request_uri = "/v1/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: TagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: TagResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ConflictException) + o.errors << Shapes::ShapeRef.new(shape: BadRequestException) + o.errors << Shapes::ShapeRef.new(shape: ForbiddenException) + o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) + end) + + api.add_operation(:untag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "UntagResource" + o.http_method = "DELETE" + o.http_request_uri = "/v1/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: UntagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: UntagResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: NotFoundException) + o.errors << Shapes::ShapeRef.new(shape: BadRequestException) + o.errors << Shapes::ShapeRef.new(shape: ForbiddenException) + o.errors << Shapes::ShapeRef.new(shape: ServiceUnavailableException) + o.errors << Shapes::ShapeRef.new(shape: TooManyRequestsException) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerErrorException) + end) + api.add_operation(:update_connector, Seahorse::Model::Operation.new.tap do |o| o.name = "UpdateConnector" o.http_method = "PUT" diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoint_provider.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoint_provider.rb index 9a60bc1ee6b..9e977eabf5f 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoint_provider.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://kafkaconnect-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoints.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoints.rb index 21d9a3eb514..379755092ed 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoints.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/endpoints.rb @@ -82,6 +82,20 @@ def self.build(context) end end + class DeleteWorkerConfiguration + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KafkaConnect::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class DescribeConnector def self.build(context) unless context.config.regional_endpoint @@ -152,6 +166,20 @@ def self.build(context) end end + class ListTagsForResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KafkaConnect::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class ListWorkerConfigurations def self.build(context) unless context.config.regional_endpoint @@ -166,6 +194,34 @@ def self.build(context) end end + class TagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KafkaConnect::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UntagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::KafkaConnect::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class UpdateConnector def self.build(context) unless context.config.regional_endpoint diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/plugins/endpoints.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/plugins/endpoints.rb index 0f2e7c303b9..210e6d1dafd 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/plugins/endpoints.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/plugins/endpoints.rb @@ -68,6 +68,8 @@ def parameters_for_operation(context) Aws::KafkaConnect::Endpoints::DeleteConnector.build(context) when :delete_custom_plugin Aws::KafkaConnect::Endpoints::DeleteCustomPlugin.build(context) + when :delete_worker_configuration + Aws::KafkaConnect::Endpoints::DeleteWorkerConfiguration.build(context) when :describe_connector Aws::KafkaConnect::Endpoints::DescribeConnector.build(context) when :describe_custom_plugin @@ -78,8 +80,14 @@ def parameters_for_operation(context) Aws::KafkaConnect::Endpoints::ListConnectors.build(context) when :list_custom_plugins Aws::KafkaConnect::Endpoints::ListCustomPlugins.build(context) + when :list_tags_for_resource + Aws::KafkaConnect::Endpoints::ListTagsForResource.build(context) when :list_worker_configurations Aws::KafkaConnect::Endpoints::ListWorkerConfigurations.build(context) + when :tag_resource + Aws::KafkaConnect::Endpoints::TagResource.build(context) + when :untag_resource + Aws::KafkaConnect::Endpoints::UntagResource.build(context) when :update_connector Aws::KafkaConnect::Endpoints::UpdateConnector.build(context) end diff --git a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/types.rb b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/types.rb index 885b3b3a014..ead2b79b46c 100644 --- a/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/types.rb +++ b/gems/aws-sdk-kafkaconnect/lib/aws-sdk-kafkaconnect/types.rb @@ -415,7 +415,13 @@ class ConnectorSummary < Struct.new( # @return [Types::LogDelivery] # # @!attribute [rw] plugins - # Specifies which plugins to use for the connector. + # Amazon MSK Connect does not currently support specifying multiple + # plugins as a list. To use more than one plugin for your connector, + # you can create a single custom plugin using a ZIP file that bundles + # multiple plugins together. + # + # Specifies which plugin to use for the connector. You must specify a + # single-element list containing one `customPlugin` object. # @return [Array] # # @!attribute [rw] service_execution_role_arn @@ -426,6 +432,10 @@ class ConnectorSummary < Struct.new( # that allow it to write to the S3 destination bucket. # @return [String] # + # @!attribute [rw] tags + # The tags you want to attach to the connector. + # @return [Hash] + # # @!attribute [rw] worker_configuration # Specifies which worker configuration to use with the connector. # @return [Types::WorkerConfiguration] @@ -444,6 +454,7 @@ class CreateConnectorRequest < Struct.new( :log_delivery, :plugins, :service_execution_role_arn, + :tags, :worker_configuration) SENSITIVE = [:connector_configuration] include Aws::Structure @@ -488,13 +499,18 @@ class CreateConnectorResponse < Struct.new( # The name of the custom plugin. # @return [String] # + # @!attribute [rw] tags + # The tags you want to attach to the custom plugin. + # @return [Hash] + # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/CreateCustomPluginRequest AWS API Documentation # class CreateCustomPluginRequest < Struct.new( :content_type, :description, :location, - :name) + :name, + :tags) SENSITIVE = [] include Aws::Structure end @@ -539,12 +555,17 @@ class CreateCustomPluginResponse < Struct.new( # Base64 encoded contents of connect-distributed.properties file. # @return [String] # + # @!attribute [rw] tags + # The tags you want to attach to the worker configuration. + # @return [Hash] + # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/CreateWorkerConfigurationRequest AWS API Documentation # class CreateWorkerConfigurationRequest < Struct.new( :description, :name, - :properties_file_content) + :properties_file_content, + :tags) SENSITIVE = [:properties_file_content] include Aws::Structure end @@ -566,19 +587,24 @@ class CreateWorkerConfigurationRequest < Struct.new( # configuration. # @return [String] # + # @!attribute [rw] worker_configuration_state + # The state of the worker configuration. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/CreateWorkerConfigurationResponse AWS API Documentation # class CreateWorkerConfigurationResponse < Struct.new( :creation_time, :latest_revision, :name, - :worker_configuration_arn) + :worker_configuration_arn, + :worker_configuration_state) SENSITIVE = [] include Aws::Structure end - # A plugin is an AWS resource that contains the code that defines a - # connector's logic. + # A plugin is an Amazon Web Services resource that contains the code + # that defines a connector's logic. # # @!attribute [rw] custom_plugin_arn # The Amazon Resource Name (ARN) of the custom plugin. @@ -812,6 +838,37 @@ class DeleteCustomPluginResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] worker_configuration_arn + # The Amazon Resource Name (ARN) of the worker configuration that you + # want to delete. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/DeleteWorkerConfigurationRequest AWS API Documentation + # + class DeleteWorkerConfigurationRequest < Struct.new( + :worker_configuration_arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] worker_configuration_arn + # The Amazon Resource Name (ARN) of the worker configuration that you + # requested to delete. + # @return [String] + # + # @!attribute [rw] worker_configuration_state + # The state of the worker configuration. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/DeleteWorkerConfigurationResponse AWS API Documentation + # + class DeleteWorkerConfigurationResponse < Struct.new( + :worker_configuration_arn, + :worker_configuration_state) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] connector_arn # The Amazon Resource Name (ARN) of the connector that you want to # describe. @@ -1012,6 +1069,10 @@ class DescribeWorkerConfigurationRequest < Struct.new( # The Amazon Resource Name (ARN) of the custom configuration. # @return [String] # + # @!attribute [rw] worker_configuration_state + # The state of the worker configuration. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/DescribeWorkerConfigurationResponse AWS API Documentation # class DescribeWorkerConfigurationResponse < Struct.new( @@ -1019,7 +1080,8 @@ class DescribeWorkerConfigurationResponse < Struct.new( :description, :latest_revision, :name, - :worker_configuration_arn) + :worker_configuration_arn, + :worker_configuration_state) SENSITIVE = [] include Aws::Structure end @@ -1235,6 +1297,10 @@ class ListConnectorsResponse < Struct.new( # The maximum number of custom plugins to list in one response. # @return [Integer] # + # @!attribute [rw] name_prefix + # Lists custom plugin names that start with the specified text string. + # @return [String] + # # @!attribute [rw] next_token # If the response of a ListCustomPlugins operation is truncated, it # will include a NextToken. Send this NextToken in a subsequent @@ -1246,6 +1312,7 @@ class ListConnectorsResponse < Struct.new( # class ListCustomPluginsRequest < Struct.new( :max_results, + :name_prefix, :next_token) SENSITIVE = [] include Aws::Structure @@ -1271,10 +1338,41 @@ class ListCustomPluginsResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the resource for which you want to + # list all attached tags. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/ListTagsForResourceRequest AWS API Documentation + # + class ListTagsForResourceRequest < Struct.new( + :resource_arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] tags + # Lists the tags attached to the specified resource in the + # corresponding request. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/ListTagsForResourceResponse AWS API Documentation + # + class ListTagsForResourceResponse < Struct.new( + :tags) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] max_results # The maximum number of worker configurations to list in one response. # @return [Integer] # + # @!attribute [rw] name_prefix + # Lists worker configuration names that start with the specified text + # string. + # @return [String] + # # @!attribute [rw] next_token # If the response of a ListWorkerConfigurations operation is # truncated, it will include a NextToken. Send this NextToken in a @@ -1286,6 +1384,7 @@ class ListCustomPluginsResponse < Struct.new( # class ListWorkerConfigurationsRequest < Struct.new( :max_results, + :name_prefix, :next_token) SENSITIVE = [] include Aws::Structure @@ -1355,8 +1454,8 @@ class NotFoundException < Struct.new( include Aws::Structure end - # A plugin is an AWS resource that contains the code that defines your - # connector logic. + # A plugin is an Amazon Web Services resource that contains the code + # that defines your connector logic. # # @!attribute [rw] custom_plugin # Details about a custom plugin. @@ -1665,6 +1764,28 @@ class StateDescription < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the resource to which you want to + # attach tags. + # @return [String] + # + # @!attribute [rw] tags + # The tags that you want to attach to the resource. + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/TagResourceRequest AWS API Documentation + # + class TagResourceRequest < Struct.new( + :resource_arn, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/TagResourceResponse AWS API Documentation + # + class TagResourceResponse < Aws::EmptyStructure; end + # HTTP Status Code 429: Limit exceeded. Resource limit reached. # # @!attribute [rw] message @@ -1692,6 +1813,28 @@ class UnauthorizedException < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) of the resource from which you want + # to remove tags. + # @return [String] + # + # @!attribute [rw] tag_keys + # The keys of the tags that you want to remove from the resource. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/UntagResourceRequest AWS API Documentation + # + class UntagResourceRequest < Struct.new( + :resource_arn, + :tag_keys) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/UntagResourceResponse AWS API Documentation + # + class UntagResourceResponse < Aws::EmptyStructure; end + # @!attribute [rw] capacity # The target capacity. # @return [Types::CapacityUpdate] @@ -1884,6 +2027,10 @@ class WorkerConfigurationRevisionSummary < Struct.new( # The Amazon Resource Name (ARN) of the worker configuration. # @return [String] # + # @!attribute [rw] worker_configuration_state + # The state of the worker configuration. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/kafkaconnect-2021-09-14/WorkerConfigurationSummary AWS API Documentation # class WorkerConfigurationSummary < Struct.new( @@ -1891,7 +2038,8 @@ class WorkerConfigurationSummary < Struct.new( :description, :latest_revision, :name, - :worker_configuration_arn) + :worker_configuration_arn, + :worker_configuration_state) SENSITIVE = [] include Aws::Structure end diff --git a/gems/aws-sdk-kafkaconnect/sig/client.rbs b/gems/aws-sdk-kafkaconnect/sig/client.rbs index d03468a15c7..6a065572842 100644 --- a/gems/aws-sdk-kafkaconnect/sig/client.rbs +++ b/gems/aws-sdk-kafkaconnect/sig/client.rbs @@ -142,6 +142,7 @@ module Aws }, ], service_execution_role_arn: ::String, + ?tags: Hash[::String, ::String], ?worker_configuration: { revision: ::Integer, worker_configuration_arn: ::String @@ -167,7 +168,8 @@ module Aws object_version: ::String? } }, - name: ::String + name: ::String, + ?tags: Hash[::String, ::String] ) -> _CreateCustomPluginResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateCustomPluginResponseSuccess @@ -177,12 +179,14 @@ module Aws def latest_revision: () -> Types::WorkerConfigurationRevisionSummary def name: () -> ::String def worker_configuration_arn: () -> ::String + def worker_configuration_state: () -> ("ACTIVE" | "DELETING") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#create_worker_configuration-instance_method def create_worker_configuration: ( ?description: ::String, name: ::String, - properties_file_content: ::String + properties_file_content: ::String, + ?tags: Hash[::String, ::String] ) -> _CreateWorkerConfigurationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateWorkerConfigurationResponseSuccess @@ -209,6 +213,17 @@ module Aws ) -> _DeleteCustomPluginResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteCustomPluginResponseSuccess + interface _DeleteWorkerConfigurationResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::DeleteWorkerConfigurationResponse] + def worker_configuration_arn: () -> ::String + def worker_configuration_state: () -> ("ACTIVE" | "DELETING") + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#delete_worker_configuration-instance_method + def delete_worker_configuration: ( + worker_configuration_arn: ::String + ) -> _DeleteWorkerConfigurationResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DeleteWorkerConfigurationResponseSuccess + interface _DescribeConnectorResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DescribeConnectorResponse] def capacity: () -> Types::CapacityDescription @@ -258,6 +273,7 @@ module Aws def latest_revision: () -> Types::WorkerConfigurationRevisionDescription def name: () -> ::String def worker_configuration_arn: () -> ::String + def worker_configuration_state: () -> ("ACTIVE" | "DELETING") end # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#describe_worker_configuration-instance_method def describe_worker_configuration: ( @@ -286,10 +302,21 @@ module Aws # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#list_custom_plugins-instance_method def list_custom_plugins: ( ?max_results: ::Integer, + ?name_prefix: ::String, ?next_token: ::String ) -> _ListCustomPluginsResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListCustomPluginsResponseSuccess + interface _ListTagsForResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::ListTagsForResourceResponse] + def tags: () -> ::Hash[::String, ::String] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#list_tags_for_resource-instance_method + def list_tags_for_resource: ( + resource_arn: ::String + ) -> _ListTagsForResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListTagsForResourceResponseSuccess + interface _ListWorkerConfigurationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListWorkerConfigurationsResponse] def next_token: () -> ::String @@ -298,10 +325,31 @@ module Aws # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#list_worker_configurations-instance_method def list_worker_configurations: ( ?max_results: ::Integer, + ?name_prefix: ::String, ?next_token: ::String ) -> _ListWorkerConfigurationsResponseSuccess | (?Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListWorkerConfigurationsResponseSuccess + interface _TagResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::TagResourceResponse] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#tag_resource-instance_method + def tag_resource: ( + resource_arn: ::String, + tags: Hash[::String, ::String] + ) -> _TagResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _TagResourceResponseSuccess + + interface _UntagResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::UntagResourceResponse] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/KafkaConnect/Client.html#untag_resource-instance_method + def untag_resource: ( + resource_arn: ::String, + tag_keys: Array[::String] + ) -> _UntagResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UntagResourceResponseSuccess + interface _UpdateConnectorResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::UpdateConnectorResponse] def connector_arn: () -> ::String diff --git a/gems/aws-sdk-kafkaconnect/sig/types.rbs b/gems/aws-sdk-kafkaconnect/sig/types.rbs index 23a6b114a7b..c69b1b9df17 100644 --- a/gems/aws-sdk-kafkaconnect/sig/types.rbs +++ b/gems/aws-sdk-kafkaconnect/sig/types.rbs @@ -118,6 +118,7 @@ module Aws::KafkaConnect attr_accessor log_delivery: Types::LogDelivery attr_accessor plugins: ::Array[Types::Plugin] attr_accessor service_execution_role_arn: ::String + attr_accessor tags: ::Hash[::String, ::String] attr_accessor worker_configuration: Types::WorkerConfiguration SENSITIVE: [:connector_configuration] end @@ -134,6 +135,7 @@ module Aws::KafkaConnect attr_accessor description: ::String attr_accessor location: Types::CustomPluginLocation attr_accessor name: ::String + attr_accessor tags: ::Hash[::String, ::String] SENSITIVE: [] end @@ -149,6 +151,7 @@ module Aws::KafkaConnect attr_accessor description: ::String attr_accessor name: ::String attr_accessor properties_file_content: ::String + attr_accessor tags: ::Hash[::String, ::String] SENSITIVE: [:properties_file_content] end @@ -157,6 +160,7 @@ module Aws::KafkaConnect attr_accessor latest_revision: Types::WorkerConfigurationRevisionSummary attr_accessor name: ::String attr_accessor worker_configuration_arn: ::String + attr_accessor worker_configuration_state: ("ACTIVE" | "DELETING") SENSITIVE: [] end @@ -231,6 +235,17 @@ module Aws::KafkaConnect SENSITIVE: [] end + class DeleteWorkerConfigurationRequest + attr_accessor worker_configuration_arn: ::String + SENSITIVE: [] + end + + class DeleteWorkerConfigurationResponse + attr_accessor worker_configuration_arn: ::String + attr_accessor worker_configuration_state: ("ACTIVE" | "DELETING") + SENSITIVE: [] + end + class DescribeConnectorRequest attr_accessor connector_arn: ::String SENSITIVE: [] @@ -284,6 +299,7 @@ module Aws::KafkaConnect attr_accessor latest_revision: Types::WorkerConfigurationRevisionDescription attr_accessor name: ::String attr_accessor worker_configuration_arn: ::String + attr_accessor worker_configuration_state: ("ACTIVE" | "DELETING") SENSITIVE: [] end @@ -354,6 +370,7 @@ module Aws::KafkaConnect class ListCustomPluginsRequest attr_accessor max_results: ::Integer + attr_accessor name_prefix: ::String attr_accessor next_token: ::String SENSITIVE: [] end @@ -364,8 +381,19 @@ module Aws::KafkaConnect SENSITIVE: [] end + class ListTagsForResourceRequest + attr_accessor resource_arn: ::String + SENSITIVE: [] + end + + class ListTagsForResourceResponse + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + class ListWorkerConfigurationsRequest attr_accessor max_results: ::Integer + attr_accessor name_prefix: ::String attr_accessor next_token: ::String SENSITIVE: [] end @@ -488,6 +516,15 @@ module Aws::KafkaConnect SENSITIVE: [] end + class TagResourceRequest + attr_accessor resource_arn: ::String + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + + class TagResourceResponse < Aws::EmptyStructure + end + class TooManyRequestsException attr_accessor message: ::String SENSITIVE: [] @@ -498,6 +535,15 @@ module Aws::KafkaConnect SENSITIVE: [] end + class UntagResourceRequest + attr_accessor resource_arn: ::String + attr_accessor tag_keys: ::Array[::String] + SENSITIVE: [] + end + + class UntagResourceResponse < Aws::EmptyStructure + end + class UpdateConnectorRequest attr_accessor capacity: Types::CapacityUpdate attr_accessor connector_arn: ::String @@ -556,6 +602,7 @@ module Aws::KafkaConnect attr_accessor latest_revision: Types::WorkerConfigurationRevisionSummary attr_accessor name: ::String attr_accessor worker_configuration_arn: ::String + attr_accessor worker_configuration_state: ("ACTIVE" | "DELETING") SENSITIVE: [] end diff --git a/gems/aws-sdk-rds/CHANGELOG.md b/gems/aws-sdk-rds/CHANGELOG.md index 4cc47d52933..96d72494a9a 100644 --- a/gems/aws-sdk-rds/CHANGELOG.md +++ b/gems/aws-sdk-rds/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.219.0 (2024-02-26) +------------------ + +* Feature - This release adds support for gp3 data volumes for Multi-AZ DB Clusters. + 1.218.0 (2024-02-23) ------------------ diff --git a/gems/aws-sdk-rds/VERSION b/gems/aws-sdk-rds/VERSION index aaf66f15301..ba5ffb38ed4 100644 --- a/gems/aws-sdk-rds/VERSION +++ b/gems/aws-sdk-rds/VERSION @@ -1 +1 @@ -1.218.0 +1.219.0 diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb index 16c22f23b28..dc6e6cc99c7 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds.rb @@ -78,6 +78,6 @@ # @!group service module Aws::RDS - GEM_VERSION = '1.218.0' + GEM_VERSION = '1.219.0' end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb index 32d36758724..36dfaabe62d 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client.rb @@ -1417,6 +1417,7 @@ def copy_db_cluster_parameter_group(params = {}, options = {}) # resp.db_cluster_snapshot.db_system_id #=> String # resp.db_cluster_snapshot.storage_type #=> String # resp.db_cluster_snapshot.db_cluster_resource_id #=> String + # resp.db_cluster_snapshot.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CopyDBClusterSnapshot AWS API Documentation # @@ -3664,6 +3665,7 @@ def create_custom_db_engine_version(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBCluster AWS API Documentation # @@ -4090,6 +4092,7 @@ def create_db_cluster_parameter_group(params = {}, options = {}) # resp.db_cluster_snapshot.db_system_id #=> String # resp.db_cluster_snapshot.storage_type #=> String # resp.db_cluster_snapshot.db_cluster_resource_id #=> String + # resp.db_cluster_snapshot.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateDBClusterSnapshot AWS API Documentation # @@ -8805,6 +8808,7 @@ def delete_custom_db_engine_version(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteDBCluster AWS API Documentation # @@ -8861,6 +8865,7 @@ def delete_db_cluster(params = {}, options = {}) # resp.db_cluster_automated_backup.storage_type #=> String # resp.db_cluster_automated_backup.iops #=> Integer # resp.db_cluster_automated_backup.aws_backup_recovery_point_arn #=> String + # resp.db_cluster_automated_backup.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteDBClusterAutomatedBackup AWS API Documentation # @@ -9106,6 +9111,7 @@ def delete_db_cluster_parameter_group(params = {}, options = {}) # resp.db_cluster_snapshot.db_system_id #=> String # resp.db_cluster_snapshot.storage_type #=> String # resp.db_cluster_snapshot.db_cluster_resource_id #=> String + # resp.db_cluster_snapshot.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteDBClusterSnapshot AWS API Documentation # @@ -10972,6 +10978,7 @@ def describe_certificates(params = {}, options = {}) # resp.db_cluster_automated_backups[0].storage_type #=> String # resp.db_cluster_automated_backups[0].iops #=> Integer # resp.db_cluster_automated_backups[0].aws_backup_recovery_point_arn #=> String + # resp.db_cluster_automated_backups[0].storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeDBClusterAutomatedBackups AWS API Documentation # @@ -11876,6 +11883,7 @@ def describe_db_cluster_snapshot_attributes(params = {}, options = {}) # resp.db_cluster_snapshots[0].db_system_id #=> String # resp.db_cluster_snapshots[0].storage_type #=> String # resp.db_cluster_snapshots[0].db_cluster_resource_id #=> String + # resp.db_cluster_snapshots[0].storage_throughput #=> Integer # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): @@ -12220,6 +12228,7 @@ def describe_db_cluster_snapshots(params = {}, options = {}) # resp.db_clusters[0].aws_backup_recovery_point_arn #=> String # resp.db_clusters[0].limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_clusters[0].limitless_database.min_required_acu #=> Float + # resp.db_clusters[0].storage_throughput #=> Integer # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): @@ -17488,6 +17497,7 @@ def enable_http_endpoint(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/FailoverDBCluster AWS API Documentation # @@ -19140,6 +19150,7 @@ def modify_custom_db_engine_version(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBCluster AWS API Documentation # @@ -22705,6 +22716,7 @@ def promote_read_replica(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/PromoteReadReplicaDBCluster AWS API Documentation # @@ -22995,6 +23007,7 @@ def purchase_reserved_db_instances_offering(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RebootDBCluster AWS API Documentation # @@ -24518,6 +24531,7 @@ def reset_db_parameter_group(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RestoreDBClusterFromS3 AWS API Documentation # @@ -25232,6 +25246,7 @@ def restore_db_cluster_from_s3(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RestoreDBClusterFromSnapshot AWS API Documentation # @@ -25921,6 +25936,7 @@ def restore_db_cluster_from_snapshot(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RestoreDBClusterToPointInTime AWS API Documentation # @@ -26420,8 +26436,7 @@ def restore_db_cluster_to_point_in_time(params = {}, options = {}) # This setting doesn't apply to RDS Custom or Amazon Aurora. # # @option params [String] :db_cluster_snapshot_identifier - # The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to - # restore from. + # The identifier for the Multi-AZ DB cluster snapshot to restore from. # # For more information on Multi-AZ DB clusters, see [ Multi-AZ DB # cluster deployments][1] in the *Amazon RDS User Guide*. @@ -26441,9 +26456,6 @@ def restore_db_cluster_to_point_in_time(params = {}, options = {}) # # * Can't be the identifier of an Aurora DB cluster snapshot. # - # * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB - # cluster snapshot. - # # # # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html @@ -28731,6 +28743,7 @@ def start_activity_stream(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartDBCluster AWS API Documentation # @@ -29567,6 +29580,7 @@ def stop_activity_stream(params = {}, options = {}) # resp.db_cluster.aws_backup_recovery_point_arn #=> String # resp.db_cluster.limitless_database.status #=> String, one of "active", "not-in-use", "enabled", "disabled", "enabling", "disabling", "modifying-max-capacity", "error" # resp.db_cluster.limitless_database.min_required_acu #=> Float + # resp.db_cluster.storage_throughput #=> Integer # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StopDBCluster AWS API Documentation # @@ -30414,7 +30428,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-rds' - context[:gem_version] = '1.218.0' + context[:gem_version] = '1.219.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb index 759f2b34067..c8135d4e7d5 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/client_api.rb @@ -1422,6 +1422,7 @@ module ClientApi DBCluster.add_member(:local_write_forwarding_status, Shapes::ShapeRef.new(shape: LocalWriteForwardingStatus, location_name: "LocalWriteForwardingStatus")) DBCluster.add_member(:aws_backup_recovery_point_arn, Shapes::ShapeRef.new(shape: String, location_name: "AwsBackupRecoveryPointArn")) DBCluster.add_member(:limitless_database, Shapes::ShapeRef.new(shape: LimitlessDatabase, location_name: "LimitlessDatabase")) + DBCluster.add_member(:storage_throughput, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "StorageThroughput")) DBCluster.struct_class = Types::DBCluster DBClusterAlreadyExistsFault.struct_class = Types::DBClusterAlreadyExistsFault @@ -1450,6 +1451,7 @@ module ClientApi DBClusterAutomatedBackup.add_member(:storage_type, Shapes::ShapeRef.new(shape: String, location_name: "StorageType")) DBClusterAutomatedBackup.add_member(:iops, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "Iops")) DBClusterAutomatedBackup.add_member(:aws_backup_recovery_point_arn, Shapes::ShapeRef.new(shape: String, location_name: "AwsBackupRecoveryPointArn")) + DBClusterAutomatedBackup.add_member(:storage_throughput, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "StorageThroughput")) DBClusterAutomatedBackup.struct_class = Types::DBClusterAutomatedBackup DBClusterAutomatedBackupList.member = Shapes::ShapeRef.new(shape: DBClusterAutomatedBackup, location_name: "DBClusterAutomatedBackup") @@ -1592,6 +1594,7 @@ module ClientApi DBClusterSnapshot.add_member(:db_system_id, Shapes::ShapeRef.new(shape: String, location_name: "DBSystemId")) DBClusterSnapshot.add_member(:storage_type, Shapes::ShapeRef.new(shape: String, location_name: "StorageType")) DBClusterSnapshot.add_member(:db_cluster_resource_id, Shapes::ShapeRef.new(shape: String, location_name: "DbClusterResourceId")) + DBClusterSnapshot.add_member(:storage_throughput, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "StorageThroughput")) DBClusterSnapshot.struct_class = Types::DBClusterSnapshot DBClusterSnapshotAlreadyExistsFault.struct_class = Types::DBClusterSnapshotAlreadyExistsFault diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb index b046799bf63..212f8a0a8b6 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster.rb @@ -686,6 +686,16 @@ def limitless_database data[:limitless_database] end + # The storage throughput for the DB cluster. The throughput is + # automatically set based on the IOPS that you provision, and is not + # configurable. + # + # This setting is only for non-Aurora Multi-AZ DB clusters. + # @return [Integer] + def storage_throughput + data[:storage_throughput] + end + # @!endgroup # @return [Client] diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb index 0d3c4c0eaaa..64cc41fd6b7 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_cluster_snapshot.rb @@ -209,6 +209,16 @@ def db_cluster_resource_id data[:db_cluster_resource_id] end + # The storage throughput for the DB cluster snapshot. The throughput is + # automatically set based on the IOPS that you provision, and is not + # configurable. + # + # This setting is only for non-Aurora Multi-AZ DB clusters. + # @return [Integer] + def storage_throughput + data[:storage_throughput] + end + # @!endgroup # @return [Client] diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb index ba5871e2962..b63825a0da7 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/db_snapshot.rb @@ -1119,8 +1119,7 @@ def delete(options = {}) # # This setting doesn't apply to RDS Custom or Amazon Aurora. # @option options [String] :db_cluster_snapshot_identifier - # The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to - # restore from. + # The identifier for the Multi-AZ DB cluster snapshot to restore from. # # For more information on Multi-AZ DB clusters, see [ Multi-AZ DB # cluster deployments][1] in the *Amazon RDS User Guide*. @@ -1140,9 +1139,6 @@ def delete(options = {}) # # * Can't be the identifier of an Aurora DB cluster snapshot. # - # * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB - # cluster snapshot. - # # # # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html diff --git a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb index 2ca1ef28765..de25615dc46 100644 --- a/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb +++ b/gems/aws-sdk-rds/lib/aws-sdk-rds/types.rb @@ -6869,6 +6869,14 @@ class CustomDBEngineVersionQuotaExceededFault < Aws::EmptyStructure; end # The details for Aurora Limitless Database. # @return [Types::LimitlessDatabase] # + # @!attribute [rw] storage_throughput + # The storage throughput for the DB cluster. The throughput is + # automatically set based on the IOPS that you provision, and is not + # configurable. + # + # This setting is only for non-Aurora Multi-AZ DB clusters. + # @return [Integer] + # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DBCluster AWS API Documentation # class DBCluster < Struct.new( @@ -6948,7 +6956,8 @@ class DBCluster < Struct.new( :io_optimized_next_allowed_modification_time, :local_write_forwarding_status, :aws_backup_recovery_point_arn, - :limitless_database) + :limitless_database, + :storage_throughput) SENSITIVE = [] include Aws::Structure end @@ -7089,6 +7098,14 @@ class DBClusterAlreadyExistsFault < Aws::EmptyStructure; end # Services Backup. # @return [String] # + # @!attribute [rw] storage_throughput + # The storage throughput for the automated backup. The throughput is + # automatically set based on the IOPS that you provision, and is not + # configurable. + # + # This setting is only for non-Aurora Multi-AZ DB clusters. + # @return [Integer] + # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DBClusterAutomatedBackup AWS API Documentation # class DBClusterAutomatedBackup < Struct.new( @@ -7115,7 +7132,8 @@ class DBClusterAutomatedBackup < Struct.new( :kms_key_id, :storage_type, :iops, - :aws_backup_recovery_point_arn) + :aws_backup_recovery_point_arn, + :storage_throughput) SENSITIVE = [] include Aws::Structure end @@ -7786,6 +7804,14 @@ class DBClusterRoleQuotaExceededFault < Aws::EmptyStructure; end # created from. # @return [String] # + # @!attribute [rw] storage_throughput + # The storage throughput for the DB cluster snapshot. The throughput + # is automatically set based on the IOPS that you provision, and is + # not configurable. + # + # This setting is only for non-Aurora Multi-AZ DB clusters. + # @return [Integer] + # # @see http://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DBClusterSnapshot AWS API Documentation # class DBClusterSnapshot < Struct.new( @@ -7813,7 +7839,8 @@ class DBClusterSnapshot < Struct.new( :tag_list, :db_system_id, :storage_type, - :db_cluster_resource_id) + :db_cluster_resource_id, + :storage_throughput) SENSITIVE = [] include Aws::Structure end @@ -23872,8 +23899,7 @@ class RestoreDBClusterToPointInTimeResult < Struct.new( # @return [Integer] # # @!attribute [rw] db_cluster_snapshot_identifier - # The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to - # restore from. + # The identifier for the Multi-AZ DB cluster snapshot to restore from. # # For more information on Multi-AZ DB clusters, see [ Multi-AZ DB # cluster deployments][1] in the *Amazon RDS User Guide*. @@ -23893,9 +23919,6 @@ class RestoreDBClusterToPointInTimeResult < Struct.new( # # * Can't be the identifier of an Aurora DB cluster snapshot. # - # * Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB - # cluster snapshot. - # # # # [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html diff --git a/gems/aws-sdk-rds/sig/db_cluster.rbs b/gems/aws-sdk-rds/sig/db_cluster.rbs index 071d28bd9e1..aed423d82c2 100644 --- a/gems/aws-sdk-rds/sig/db_cluster.rbs +++ b/gems/aws-sdk-rds/sig/db_cluster.rbs @@ -246,6 +246,9 @@ module Aws # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBCluster.html#limitless_database-instance_method def limitless_database: () -> Types::LimitlessDatabase + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBCluster.html#storage_throughput-instance_method + def storage_throughput: () -> ::Integer + def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBCluster.html#load-instance_method diff --git a/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs b/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs index 7923fdbb177..3b413e99b62 100644 --- a/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs +++ b/gems/aws-sdk-rds/sig/db_cluster_snapshot.rbs @@ -91,6 +91,9 @@ module Aws # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBClusterSnapshot.html#db_cluster_resource_id-instance_method def db_cluster_resource_id: () -> ::String + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBClusterSnapshot.html#storage_throughput-instance_method + def storage_throughput: () -> ::Integer + def client: () -> Client # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/RDS/DBClusterSnapshot.html#load-instance_method diff --git a/gems/aws-sdk-rds/sig/types.rbs b/gems/aws-sdk-rds/sig/types.rbs index 0339c7decb0..37b7d0cf6ed 100644 --- a/gems/aws-sdk-rds/sig/types.rbs +++ b/gems/aws-sdk-rds/sig/types.rbs @@ -811,6 +811,7 @@ module Aws::RDS attr_accessor local_write_forwarding_status: ("enabled" | "disabled" | "enabling" | "disabling" | "requested") attr_accessor aws_backup_recovery_point_arn: ::String attr_accessor limitless_database: Types::LimitlessDatabase + attr_accessor storage_throughput: ::Integer SENSITIVE: [] end @@ -842,6 +843,7 @@ module Aws::RDS attr_accessor storage_type: ::String attr_accessor iops: ::Integer attr_accessor aws_backup_recovery_point_arn: ::String + attr_accessor storage_throughput: ::Integer SENSITIVE: [] end @@ -1010,6 +1012,7 @@ module Aws::RDS attr_accessor db_system_id: ::String attr_accessor storage_type: ::String attr_accessor db_cluster_resource_id: ::String + attr_accessor storage_throughput: ::Integer SENSITIVE: [] end From caa6fbadd1a75ee7dd5a731893fdf429f45d3c29 Mon Sep 17 00:00:00 2001 From: Alex Woods Date: Mon, 26 Feb 2024 13:39:50 -0800 Subject: [PATCH 7/8] Add frames.erb --- .../templates/default/fulldoc/html/frames.erb | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 doc-src/templates/default/fulldoc/html/frames.erb diff --git a/doc-src/templates/default/fulldoc/html/frames.erb b/doc-src/templates/default/fulldoc/html/frames.erb new file mode 100644 index 00000000000..a4ce967778a --- /dev/null +++ b/doc-src/templates/default/fulldoc/html/frames.erb @@ -0,0 +1,20 @@ + + + + + <%= options.title %> + + + + From b126c41bcc5aeb8a551492a4458c41dda51a0b23 Mon Sep 17 00:00:00 2001 From: AWS SDK For Ruby Date: Tue, 27 Feb 2024 19:11:04 +0000 Subject: [PATCH 8/8] Updated API models and rebuilt service gems. --- apis/amplifyuibuilder/2021-08-11/api-2.json | 141 ++++++++++++++++-- apis/amplifyuibuilder/2021-08-11/docs-2.json | 55 +++++-- .../2021-08-11/endpoint-rule-set-1.json | 40 ++--- gems/aws-sdk-amplifyuibuilder/CHANGELOG.md | 5 + gems/aws-sdk-amplifyuibuilder/VERSION | 2 +- .../lib/aws-sdk-amplifyuibuilder.rb | 2 +- .../lib/aws-sdk-amplifyuibuilder/client.rb | 86 ++++++++++- .../aws-sdk-amplifyuibuilder/client_api.rb | 76 +++++++++- .../endpoint_provider.rb | 2 +- .../lib/aws-sdk-amplifyuibuilder/endpoints.rb | 42 ++++++ .../plugins/endpoints.rb | 6 + .../lib/aws-sdk-amplifyuibuilder/types.rb | 68 +++++++++ gems/aws-sdk-amplifyuibuilder/sig/client.rbs | 30 ++++ gems/aws-sdk-amplifyuibuilder/sig/types.rbs | 28 ++++ 14 files changed, 529 insertions(+), 54 deletions(-) diff --git a/apis/amplifyuibuilder/2021-08-11/api-2.json b/apis/amplifyuibuilder/2021-08-11/api-2.json index 99cacc4272f..e890602a2be 100644 --- a/apis/amplifyuibuilder/2021-08-11/api-2.json +++ b/apis/amplifyuibuilder/2021-08-11/api-2.json @@ -281,6 +281,23 @@ {"shape":"InvalidParameterException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ListThemes":{ "name":"ListThemes", "http":{ @@ -306,7 +323,8 @@ "errors":[ {"shape":"UnauthorizedException"}, {"shape":"InvalidParameterException"} - ] + ], + "idempotent":true }, "RefreshToken":{ "name":"RefreshToken", @@ -336,6 +354,42 @@ {"shape":"ThrottlingException"} ] }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "UpdateComponent":{ "name":"UpdateComponent", "http":{ @@ -1791,11 +1845,6 @@ "nextToken":{"shape":"String"} } }, - "ListComponentsLimit":{ - "type":"integer", - "max":100, - "min":1 - }, "ListComponentsRequest":{ "type":"structure", "required":[ @@ -1819,7 +1868,7 @@ "locationName":"nextToken" }, "maxResults":{ - "shape":"ListComponentsLimit", + "shape":"ListEntityLimit", "location":"querystring", "locationName":"maxResults" } @@ -1833,8 +1882,9 @@ "nextToken":{"shape":"String"} } }, - "ListFormsLimit":{ + "ListEntityLimit":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -1861,7 +1911,7 @@ "locationName":"nextToken" }, "maxResults":{ - "shape":"ListFormsLimit", + "shape":"ListEntityLimit", "location":"querystring", "locationName":"maxResults" } @@ -1875,10 +1925,23 @@ "nextToken":{"shape":"String"} } }, - "ListThemesLimit":{ - "type":"integer", - "max":100, - "min":1 + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{"shape":"Tags"} + } }, "ListThemesRequest":{ "type":"structure", @@ -1903,7 +1966,7 @@ "locationName":"nextToken" }, "maxResults":{ - "shape":"ListThemesLimit", + "shape":"ListEntityLimit", "location":"querystring", "locationName":"maxResults" } @@ -2194,6 +2257,32 @@ "min":1, "pattern":"(?!aws:)[a-zA-Z+-=._:/]+" }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"Tags"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "TagValue":{ "type":"string", "max":256, @@ -2298,6 +2387,30 @@ }, "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateComponentData":{ "type":"structure", "members":{ diff --git a/apis/amplifyuibuilder/2021-08-11/docs-2.json b/apis/amplifyuibuilder/2021-08-11/docs-2.json index b0105a38f8a..0bef1751a72 100644 --- a/apis/amplifyuibuilder/2021-08-11/docs-2.json +++ b/apis/amplifyuibuilder/2021-08-11/docs-2.json @@ -20,10 +20,13 @@ "ListCodegenJobs": "

Retrieves a list of code generation jobs for a specified Amplify app and backend environment.

", "ListComponents": "

Retrieves a list of components for a specified Amplify app and backend environment.

", "ListForms": "

Retrieves a list of forms for a specified Amplify app and backend environment.

", + "ListTagsForResource": "

Returns a list of tags for a specified Amazon Resource Name (ARN).

", "ListThemes": "

Retrieves a list of themes for a specified Amplify app and backend environment.

", "PutMetadataFlag": "

Stores the metadata information about a feature on a form.

", "RefreshToken": "

This is for internal use.

Amplify uses this action to refresh a previously issued access token that might have expired.

", "StartCodegenJob": "

Starts a code generation job for a specified Amplify app and backend environment.

", + "TagResource": "

Tags the resource with a tag key and value.

", + "UntagResource": "

Untags a resource with a specified Amazon Resource Name (ARN).

", "UpdateComponent": "

Updates an existing component.

", "UpdateForm": "

Updates an existing form.

", "UpdateTheme": "

Updates an existing theme.

" @@ -877,12 +880,6 @@ "refs": { } }, - "ListComponentsLimit": { - "base": null, - "refs": { - "ListComponentsRequest$maxResults": "

The maximum number of components to retrieve.

" - } - }, "ListComponentsRequest": { "base": null, "refs": { @@ -893,10 +890,12 @@ "refs": { } }, - "ListFormsLimit": { + "ListEntityLimit": { "base": null, "refs": { - "ListFormsRequest$maxResults": "

The maximum number of forms to retrieve.

" + "ListComponentsRequest$maxResults": "

The maximum number of components to retrieve.

", + "ListFormsRequest$maxResults": "

The maximum number of forms to retrieve.

", + "ListThemesRequest$maxResults": "

The maximum number of theme results to return in the response.

" } }, "ListFormsRequest": { @@ -909,10 +908,14 @@ "refs": { } }, - "ListThemesLimit": { + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { "base": null, "refs": { - "ListThemesRequest$maxResults": "

The maximum number of theme results to return in the response.

" } }, "ListThemesRequest": { @@ -1259,6 +1262,7 @@ "ListFormsRequest$environmentName": "

The name of the backend environment that is a part of the Amplify app.

", "ListFormsRequest$nextToken": "

The token to request the next page of results.

", "ListFormsResponse$nextToken": "

The pagination token that's included if more results are available.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) to use to list tags.

", "ListThemesRequest$appId": "

The unique ID for the Amplify app.

", "ListThemesRequest$environmentName": "

The name of the backend environment that is a part of the Amplify app.

", "ListThemesRequest$nextToken": "

The token to request the next page of results.

", @@ -1286,6 +1290,7 @@ "StartCodegenJobRequest$environmentName": "

The name of the backend environment that is a part of the Amplify app.

", "StartCodegenJobRequest$clientToken": "

The idempotency token used to ensure that the code generation job request completes only once.

", "StrValues$member": null, + "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) to use to tag a resource.

", "Theme$appId": "

The unique ID for the Amplify app associated with the theme.

", "Theme$environmentName": "

The name of the backend environment that is a part of the Amplify app.

", "ThemeSummary$appId": "

The unique ID for the app associated with the theme summary.

", @@ -1294,6 +1299,7 @@ "ThemeValues$key": "

The name of the property.

", "ThrottlingException$message": null, "UnauthorizedException$message": null, + "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) to use to untag a resource.

", "UpdateComponentData$sourceId": "

The unique ID of the component in its original source system, such as Figma.

", "UpdateComponentData$schemaVersion": "

The schema version of the component when it was imported.

", "UpdateComponentRequest$appId": "

The unique ID for the Amplify app.

", @@ -1324,9 +1330,26 @@ "TagKey": { "base": null, "refs": { + "TagKeyList$member": null, "Tags$key": null } }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$tagKeys": "

The tag keys to use to untag a resource.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, "TagValue": { "base": null, "refs": { @@ -1342,7 +1365,9 @@ "CreateFormData$tags": "

One or more key-value pairs to use when tagging the form data.

", "CreateThemeData$tags": "

One or more key-value pairs to use when tagging the theme data.

", "Form$tags": "

One or more key-value pairs to use when tagging the form.

", + "ListTagsForResourceResponse$tags": "

A list of tag key value pairs for a specified Amazon Resource Name (ARN).

", "StartCodegenJobData$tags": "

One or more key-value pairs to use when tagging the code generation job data.

", + "TagResourceRequest$tags": "

A list of tag key value pairs for a specified Amazon Resource Name (ARN).

", "Theme$tags": "

One or more key-value pairs to use when tagging the theme.

" } }, @@ -1423,6 +1448,16 @@ "refs": { } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, "UpdateComponentData": { "base": "

Updates and saves all of the information about a component, based on component ID.

", "refs": { diff --git a/apis/amplifyuibuilder/2021-08-11/endpoint-rule-set-1.json b/apis/amplifyuibuilder/2021-08-11/endpoint-rule-set-1.json index 24b1f786c62..311de363a48 100644 --- a/apis/amplifyuibuilder/2021-08-11/endpoint-rule-set-1.json +++ b/apis/amplifyuibuilder/2021-08-11/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/gems/aws-sdk-amplifyuibuilder/CHANGELOG.md b/gems/aws-sdk-amplifyuibuilder/CHANGELOG.md index 3413fca6ab0..1d9052cf34a 100644 --- a/gems/aws-sdk-amplifyuibuilder/CHANGELOG.md +++ b/gems/aws-sdk-amplifyuibuilder/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.24.0 (2024-02-27) +------------------ + +* Feature - We have added the ability to tag resources after they are created + 1.23.0 (2024-01-26) ------------------ diff --git a/gems/aws-sdk-amplifyuibuilder/VERSION b/gems/aws-sdk-amplifyuibuilder/VERSION index a6c2798a482..53cc1a6f929 100644 --- a/gems/aws-sdk-amplifyuibuilder/VERSION +++ b/gems/aws-sdk-amplifyuibuilder/VERSION @@ -1 +1 @@ -1.23.0 +1.24.0 diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder.rb index 475ea40a341..c9852592140 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder.rb @@ -53,6 +53,6 @@ # @!group service module Aws::AmplifyUIBuilder - GEM_VERSION = '1.23.0' + GEM_VERSION = '1.24.0' end diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client.rb index 6852df5ebcb..101f0b19307 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client.rb @@ -4210,6 +4210,35 @@ def list_forms(params = {}, options = {}) req.send_request(options) end + # Returns a list of tags for a specified Amazon Resource Name (ARN). + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) to use to list tags. + # + # @return [Types::ListTagsForResourceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListTagsForResourceResponse#tags #tags} => Hash<String,String> + # + # @example Request syntax with placeholder values + # + # resp = client.list_tags_for_resource({ + # resource_arn: "String", # required + # }) + # + # @example Response structure + # + # resp.tags #=> Hash + # resp.tags["TagKey"] #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/ListTagsForResource AWS API Documentation + # + # @overload list_tags_for_resource(params = {}) + # @param [Hash] params ({}) + def list_tags_for_resource(params = {}, options = {}) + req = build_request(:list_tags_for_resource, params) + req.send_request(options) + end + # Retrieves a list of themes for a specified Amplify app and backend # environment. # @@ -4547,6 +4576,61 @@ def start_codegen_job(params = {}, options = {}) req.send_request(options) end + # Tags the resource with a tag key and value. + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) to use to tag a resource. + # + # @option params [required, Hash] :tags + # A list of tag key value pairs for a specified Amazon Resource Name + # (ARN). + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.tag_resource({ + # resource_arn: "String", # required + # tags: { # required + # "TagKey" => "TagValue", + # }, + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/TagResource AWS API Documentation + # + # @overload tag_resource(params = {}) + # @param [Hash] params ({}) + def tag_resource(params = {}, options = {}) + req = build_request(:tag_resource, params) + req.send_request(options) + end + + # Untags a resource with a specified Amazon Resource Name (ARN). + # + # @option params [required, String] :resource_arn + # The Amazon Resource Name (ARN) to use to untag a resource. + # + # @option params [required, Array] :tag_keys + # The tag keys to use to untag a resource. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.untag_resource({ + # resource_arn: "String", # required + # tag_keys: ["TagKey"], # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/UntagResource AWS API Documentation + # + # @overload untag_resource(params = {}) + # @param [Hash] params ({}) + def untag_resource(params = {}, options = {}) + req = build_request(:untag_resource, params) + req.send_request(options) + end + # Updates an existing component. # # @option params [required, String] :app_id @@ -6434,7 +6518,7 @@ def build_request(operation_name, params = {}) params: params, config: config) context[:gem_name] = 'aws-sdk-amplifyuibuilder' - context[:gem_version] = '1.23.0' + context[:gem_version] = '1.24.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client_api.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client_api.rb index 2477650a452..74c1066911b 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client_api.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/client_api.rb @@ -142,13 +142,13 @@ module ClientApi ListCodegenJobsLimit = Shapes::IntegerShape.new(name: 'ListCodegenJobsLimit') ListCodegenJobsRequest = Shapes::StructureShape.new(name: 'ListCodegenJobsRequest') ListCodegenJobsResponse = Shapes::StructureShape.new(name: 'ListCodegenJobsResponse') - ListComponentsLimit = Shapes::IntegerShape.new(name: 'ListComponentsLimit') ListComponentsRequest = Shapes::StructureShape.new(name: 'ListComponentsRequest') ListComponentsResponse = Shapes::StructureShape.new(name: 'ListComponentsResponse') - ListFormsLimit = Shapes::IntegerShape.new(name: 'ListFormsLimit') + ListEntityLimit = Shapes::IntegerShape.new(name: 'ListEntityLimit') ListFormsRequest = Shapes::StructureShape.new(name: 'ListFormsRequest') ListFormsResponse = Shapes::StructureShape.new(name: 'ListFormsResponse') - ListThemesLimit = Shapes::IntegerShape.new(name: 'ListThemesLimit') + ListTagsForResourceRequest = Shapes::StructureShape.new(name: 'ListTagsForResourceRequest') + ListTagsForResourceResponse = Shapes::StructureShape.new(name: 'ListTagsForResourceResponse') ListThemesRequest = Shapes::StructureShape.new(name: 'ListThemesRequest') ListThemesResponse = Shapes::StructureShape.new(name: 'ListThemesResponse') MutationActionSetStateParameter = Shapes::StructureShape.new(name: 'MutationActionSetStateParameter') @@ -182,6 +182,9 @@ module ClientApi String = Shapes::StringShape.new(name: 'String') SyntheticTimestamp_date_time = Shapes::TimestampShape.new(name: 'SyntheticTimestamp_date_time', timestampFormat: "iso8601") TagKey = Shapes::StringShape.new(name: 'TagKey') + TagKeyList = Shapes::ListShape.new(name: 'TagKeyList') + TagResourceRequest = Shapes::StructureShape.new(name: 'TagResourceRequest') + TagResourceResponse = Shapes::StructureShape.new(name: 'TagResourceResponse') TagValue = Shapes::StringShape.new(name: 'TagValue') Tags = Shapes::MapShape.new(name: 'Tags') Theme = Shapes::StructureShape.new(name: 'Theme') @@ -195,6 +198,8 @@ module ClientApi ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException') TokenProviders = Shapes::StringShape.new(name: 'TokenProviders') UnauthorizedException = Shapes::StructureShape.new(name: 'UnauthorizedException') + UntagResourceRequest = Shapes::StructureShape.new(name: 'UntagResourceRequest') + UntagResourceResponse = Shapes::StructureShape.new(name: 'UntagResourceResponse') UpdateComponentData = Shapes::StructureShape.new(name: 'UpdateComponentData') UpdateComponentRequest = Shapes::StructureShape.new(name: 'UpdateComponentRequest') UpdateComponentResponse = Shapes::StructureShape.new(name: 'UpdateComponentResponse') @@ -803,7 +808,7 @@ module ClientApi ListComponentsRequest.add_member(:app_id, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "appId")) ListComponentsRequest.add_member(:environment_name, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "environmentName")) ListComponentsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location: "querystring", location_name: "nextToken")) - ListComponentsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListComponentsLimit, location: "querystring", location_name: "maxResults")) + ListComponentsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListEntityLimit, location: "querystring", location_name: "maxResults")) ListComponentsRequest.struct_class = Types::ListComponentsRequest ListComponentsResponse.add_member(:entities, Shapes::ShapeRef.new(shape: ComponentSummaryList, required: true, location_name: "entities")) @@ -813,17 +818,23 @@ module ClientApi ListFormsRequest.add_member(:app_id, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "appId")) ListFormsRequest.add_member(:environment_name, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "environmentName")) ListFormsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location: "querystring", location_name: "nextToken")) - ListFormsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListFormsLimit, location: "querystring", location_name: "maxResults")) + ListFormsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListEntityLimit, location: "querystring", location_name: "maxResults")) ListFormsRequest.struct_class = Types::ListFormsRequest ListFormsResponse.add_member(:entities, Shapes::ShapeRef.new(shape: FormSummaryList, required: true, location_name: "entities")) ListFormsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location_name: "nextToken")) ListFormsResponse.struct_class = Types::ListFormsResponse + ListTagsForResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "resourceArn")) + ListTagsForResourceRequest.struct_class = Types::ListTagsForResourceRequest + + ListTagsForResourceResponse.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, required: true, location_name: "tags")) + ListTagsForResourceResponse.struct_class = Types::ListTagsForResourceResponse + ListThemesRequest.add_member(:app_id, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "appId")) ListThemesRequest.add_member(:environment_name, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "environmentName")) ListThemesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: String, location: "querystring", location_name: "nextToken")) - ListThemesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListThemesLimit, location: "querystring", location_name: "maxResults")) + ListThemesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: ListEntityLimit, location: "querystring", location_name: "maxResults")) ListThemesRequest.struct_class = Types::ListThemesRequest ListThemesResponse.add_member(:entities, Shapes::ShapeRef.new(shape: ThemeSummaryList, required: true, location_name: "entities")) @@ -936,6 +947,14 @@ module ClientApi StrValues.member = Shapes::ShapeRef.new(shape: String) + TagKeyList.member = Shapes::ShapeRef.new(shape: TagKey) + + TagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "resourceArn")) + TagResourceRequest.add_member(:tags, Shapes::ShapeRef.new(shape: Tags, required: true, location_name: "tags")) + TagResourceRequest.struct_class = Types::TagResourceRequest + + TagResourceResponse.struct_class = Types::TagResourceResponse + Tags.key = Shapes::ShapeRef.new(shape: TagKey) Tags.value = Shapes::ShapeRef.new(shape: TagValue) @@ -976,6 +995,12 @@ module ClientApi UnauthorizedException.add_member(:message, Shapes::ShapeRef.new(shape: String, location_name: "message")) UnauthorizedException.struct_class = Types::UnauthorizedException + UntagResourceRequest.add_member(:resource_arn, Shapes::ShapeRef.new(shape: String, required: true, location: "uri", location_name: "resourceArn")) + UntagResourceRequest.add_member(:tag_keys, Shapes::ShapeRef.new(shape: TagKeyList, required: true, location: "querystring", location_name: "tagKeys")) + UntagResourceRequest.struct_class = Types::UntagResourceRequest + + UntagResourceResponse.struct_class = Types::UntagResourceResponse + UpdateComponentData.add_member(:id, Shapes::ShapeRef.new(shape: Uuid, location_name: "id")) UpdateComponentData.add_member(:name, Shapes::ShapeRef.new(shape: ComponentName, location_name: "name")) UpdateComponentData.add_member(:source_id, Shapes::ShapeRef.new(shape: String, location_name: "sourceId")) @@ -1306,6 +1331,19 @@ module ClientApi ) end) + api.add_operation(:list_tags_for_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListTagsForResource" + o.http_method = "GET" + o.http_request_uri = "/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: ListTagsForResourceRequest) + o.output = Shapes::ShapeRef.new(shape: ListTagsForResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + api.add_operation(:list_themes, Seahorse::Model::Operation.new.tap do |o| o.name = "ListThemes" o.http_method = "GET" @@ -1352,6 +1390,32 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) end) + api.add_operation(:tag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "TagResource" + o.http_method = "POST" + o.http_request_uri = "/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: TagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: TagResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + + api.add_operation(:untag_resource, Seahorse::Model::Operation.new.tap do |o| + o.name = "UntagResource" + o.http_method = "DELETE" + o.http_request_uri = "/tags/{resourceArn}" + o.input = Shapes::ShapeRef.new(shape: UntagResourceRequest) + o.output = Shapes::ShapeRef.new(shape: UntagResourceResponse) + o.errors << Shapes::ShapeRef.new(shape: UnauthorizedException) + o.errors << Shapes::ShapeRef.new(shape: InternalServerException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + end) + api.add_operation(:update_component, Seahorse::Model::Operation.new.tap do |o| o.name = "UpdateComponent" o.http_method = "PATCH" diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoint_provider.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoint_provider.rb index 1c8b2d2d0d0..a03b67339e4 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoint_provider.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoint_provider.rb @@ -32,7 +32,7 @@ def resolve_endpoint(parameters) raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both" end if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true) - if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS")) + if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true) return Aws::Endpoints::Endpoint.new(url: "https://amplifyuibuilder-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {}) end raise ArgumentError, "FIPS is enabled but this partition does not support FIPS" diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoints.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoints.rb index 48b49c49d21..38eb4898b90 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoints.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/endpoints.rb @@ -264,6 +264,20 @@ def self.build(context) end end + class ListTagsForResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::AmplifyUIBuilder::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class ListThemes def self.build(context) unless context.config.regional_endpoint @@ -320,6 +334,34 @@ def self.build(context) end end + class TagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::AmplifyUIBuilder::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + + class UntagResource + def self.build(context) + unless context.config.regional_endpoint + endpoint = context.config.endpoint.to_s + end + Aws::AmplifyUIBuilder::EndpointParameters.new( + region: context.config.region, + use_dual_stack: context.config.use_dualstack_endpoint, + use_fips: context.config.use_fips_endpoint, + endpoint: endpoint, + ) + end + end + class UpdateComponent def self.build(context) unless context.config.regional_endpoint diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/plugins/endpoints.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/plugins/endpoints.rb index 4b2705b6fc8..b994ccb555b 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/plugins/endpoints.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/plugins/endpoints.rb @@ -94,6 +94,8 @@ def parameters_for_operation(context) Aws::AmplifyUIBuilder::Endpoints::ListComponents.build(context) when :list_forms Aws::AmplifyUIBuilder::Endpoints::ListForms.build(context) + when :list_tags_for_resource + Aws::AmplifyUIBuilder::Endpoints::ListTagsForResource.build(context) when :list_themes Aws::AmplifyUIBuilder::Endpoints::ListThemes.build(context) when :put_metadata_flag @@ -102,6 +104,10 @@ def parameters_for_operation(context) Aws::AmplifyUIBuilder::Endpoints::RefreshToken.build(context) when :start_codegen_job Aws::AmplifyUIBuilder::Endpoints::StartCodegenJob.build(context) + when :tag_resource + Aws::AmplifyUIBuilder::Endpoints::TagResource.build(context) + when :untag_resource + Aws::AmplifyUIBuilder::Endpoints::UntagResource.build(context) when :update_component Aws::AmplifyUIBuilder::Endpoints::UpdateComponent.build(context) when :update_form diff --git a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/types.rb b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/types.rb index 87494a11217..a99816833db 100644 --- a/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/types.rb +++ b/gems/aws-sdk-amplifyuibuilder/lib/aws-sdk-amplifyuibuilder/types.rb @@ -2549,6 +2549,31 @@ class ListFormsResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) to use to list tags. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/ListTagsForResourceRequest AWS API Documentation + # + class ListTagsForResourceRequest < Struct.new( + :resource_arn) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] tags + # A list of tag key value pairs for a specified Amazon Resource Name + # (ARN). + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/ListTagsForResourceResponse AWS API Documentation + # + class ListTagsForResourceResponse < Struct.new( + :tags) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] app_id # The unique ID for the Amplify app. # @return [String] @@ -2998,6 +3023,28 @@ class StartCodegenJobResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) to use to tag a resource. + # @return [String] + # + # @!attribute [rw] tags + # A list of tag key value pairs for a specified Amazon Resource Name + # (ARN). + # @return [Hash] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/TagResourceRequest AWS API Documentation + # + class TagResourceRequest < Struct.new( + :resource_arn, + :tags) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/TagResourceResponse AWS API Documentation + # + class TagResourceResponse < Aws::EmptyStructure; end + # A theme is a collection of style settings that apply globally to the # components associated with an Amplify application. # @@ -3147,6 +3194,27 @@ class UnauthorizedException < Struct.new( include Aws::Structure end + # @!attribute [rw] resource_arn + # The Amazon Resource Name (ARN) to use to untag a resource. + # @return [String] + # + # @!attribute [rw] tag_keys + # The tag keys to use to untag a resource. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/UntagResourceRequest AWS API Documentation + # + class UntagResourceRequest < Struct.new( + :resource_arn, + :tag_keys) + SENSITIVE = [] + include Aws::Structure + end + + # @see http://docs.aws.amazon.com/goto/WebAPI/amplifyuibuilder-2021-08-11/UntagResourceResponse AWS API Documentation + # + class UntagResourceResponse < Aws::EmptyStructure; end + # Updates and saves all of the information about a component, based on # component ID. # diff --git a/gems/aws-sdk-amplifyuibuilder/sig/client.rbs b/gems/aws-sdk-amplifyuibuilder/sig/client.rbs index fa316d25423..20f617852a2 100644 --- a/gems/aws-sdk-amplifyuibuilder/sig/client.rbs +++ b/gems/aws-sdk-amplifyuibuilder/sig/client.rbs @@ -1188,6 +1188,16 @@ module Aws ) -> _ListFormsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListFormsResponseSuccess + interface _ListTagsForResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::ListTagsForResourceResponse] + def tags: () -> ::Hash[::String, ::String] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/AmplifyUIBuilder/Client.html#list_tags_for_resource-instance_method + def list_tags_for_resource: ( + resource_arn: ::String + ) -> _ListTagsForResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListTagsForResourceResponseSuccess + interface _ListThemesResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListThemesResponse] def entities: () -> ::Array[Types::ThemeSummary] @@ -1319,6 +1329,26 @@ module Aws ) -> _StartCodegenJobResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _StartCodegenJobResponseSuccess + interface _TagResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::TagResourceResponse] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/AmplifyUIBuilder/Client.html#tag_resource-instance_method + def tag_resource: ( + resource_arn: ::String, + tags: Hash[::String, ::String] + ) -> _TagResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _TagResourceResponseSuccess + + interface _UntagResourceResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::UntagResourceResponse] + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/AmplifyUIBuilder/Client.html#untag_resource-instance_method + def untag_resource: ( + resource_arn: ::String, + tag_keys: Array[::String] + ) -> _UntagResourceResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _UntagResourceResponseSuccess + interface _UpdateComponentResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::UpdateComponentResponse] def entity: () -> Types::Component diff --git a/gems/aws-sdk-amplifyuibuilder/sig/types.rbs b/gems/aws-sdk-amplifyuibuilder/sig/types.rbs index b36acfd61e0..547ac586a42 100644 --- a/gems/aws-sdk-amplifyuibuilder/sig/types.rbs +++ b/gems/aws-sdk-amplifyuibuilder/sig/types.rbs @@ -702,6 +702,16 @@ module Aws::AmplifyUIBuilder SENSITIVE: [] end + class ListTagsForResourceRequest + attr_accessor resource_arn: ::String + SENSITIVE: [] + end + + class ListTagsForResourceResponse + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + class ListThemesRequest attr_accessor app_id: ::String attr_accessor environment_name: ::String @@ -831,6 +841,15 @@ module Aws::AmplifyUIBuilder SENSITIVE: [] end + class TagResourceRequest + attr_accessor resource_arn: ::String + attr_accessor tags: ::Hash[::String, ::String] + SENSITIVE: [] + end + + class TagResourceResponse < Aws::EmptyStructure + end + class Theme attr_accessor app_id: ::String attr_accessor environment_name: ::String @@ -874,6 +893,15 @@ module Aws::AmplifyUIBuilder SENSITIVE: [] end + class UntagResourceRequest + attr_accessor resource_arn: ::String + attr_accessor tag_keys: ::Array[::String] + SENSITIVE: [] + end + + class UntagResourceResponse < Aws::EmptyStructure + end + class UpdateComponentData attr_accessor id: ::String attr_accessor name: ::String