diff --git a/apis/billingconductor/2021-07-30/api-2.json b/apis/billingconductor/2021-07-30/api-2.json
index e0d36eca5d6..88745aa0b3a 100644
--- a/apis/billingconductor/2021-07-30/api-2.json
+++ b/apis/billingconductor/2021-07-30/api-2.json
@@ -917,7 +917,8 @@
"BillingGroupArn":{"shape":"BillingGroupArn"},
"BillingPeriodRange":{"shape":"CustomLineItemBillingPeriodRange"},
"Tags":{"shape":"TagMap"},
- "ChargeDetails":{"shape":"CustomLineItemChargeDetails"}
+ "ChargeDetails":{"shape":"CustomLineItemChargeDetails"},
+ "AccountId":{"shape":"AccountId"}
}
},
"CreateCustomLineItemOutput":{
@@ -1088,7 +1089,8 @@
"BillingGroupArn":{"shape":"BillingGroupArn"},
"CreationTime":{"shape":"Instant"},
"LastModifiedTime":{"shape":"Instant"},
- "AssociationSize":{"shape":"NumberOfAssociations"}
+ "AssociationSize":{"shape":"NumberOfAssociations"},
+ "AccountId":{"shape":"AccountId"}
}
},
"CustomLineItemName":{
@@ -1156,7 +1158,8 @@
"StartBillingPeriod":{"shape":"BillingPeriod"},
"EndBillingPeriod":{"shape":"BillingPeriod"},
"Arn":{"shape":"CustomLineItemArn"},
- "StartTime":{"shape":"Instant"}
+ "StartTime":{"shape":"Instant"},
+ "AccountId":{"shape":"AccountId"}
}
},
"DeleteBillingGroupInput":{
@@ -1448,7 +1451,8 @@
"members":{
"Names":{"shape":"CustomLineItemNameList"},
"BillingGroups":{"shape":"BillingGroupArnList"},
- "Arns":{"shape":"CustomLineItemArns"}
+ "Arns":{"shape":"CustomLineItemArns"},
+ "AccountIds":{"shape":"AccountIdList"}
}
},
"ListCustomLineItemsInput":{
@@ -2159,7 +2163,8 @@
"INVALID_SKU_COMBO",
"INVALID_FILTER",
"TOO_MANY_AUTO_ASSOCIATE_BILLING_GROUPS",
- "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP"
+ "CANNOT_DELETE_AUTO_ASSOCIATE_BILLING_GROUP",
+ "ILLEGAL_ACCOUNT_ID"
]
}
}
diff --git a/apis/billingconductor/2021-07-30/docs-2.json b/apis/billingconductor/2021-07-30/docs-2.json
index 222e8ed20fb..117688542ef 100644
--- a/apis/billingconductor/2021-07-30/docs-2.json
+++ b/apis/billingconductor/2021-07-30/docs-2.json
@@ -78,6 +78,9 @@
"AccountIdList$member": null,
"BillingGroupListElement$PrimaryAccountId": "
The account ID that serves as the main account in a billing group.
",
"CreateBillingGroupInput$PrimaryAccountId": " The account ID that serves as the main account in a billing group.
",
+ "CreateCustomLineItemInput$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
",
+ "CustomLineItemListElement$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
",
+ "CustomLineItemVersionListElement$AccountId": "The Amazon Web Services account in which this custom line item will be applied to.
",
"ListAccountAssociationsFilter$AccountId": "The Amazon Web Services account ID to filter on.
",
"UpdateBillingGroupOutput$PrimaryAccountId": " The account ID that serves as the main account in a billing group.
"
}
@@ -93,7 +96,8 @@
"refs": {
"AccountGrouping$LinkedAccountIds": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.
",
"AssociateAccountsInput$AccountIds": " The associating array of account IDs.
",
- "DisassociateAccountsInput$AccountIds": "The array of account IDs to disassociate.
"
+ "DisassociateAccountsInput$AccountIds": "The array of account IDs to disassociate.
",
+ "ListCustomLineItemsFilter$AccountIds": "The Amazon Web Services accounts in which this custom line item will be applied to.
"
}
},
"AccountName": {
diff --git a/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json b/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json
index 260bb262df9..d54dad5ad91 100644
--- a/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json
+++ b/apis/billingconductor/2021-07-30/endpoint-rule-set-1.json
@@ -40,7 +40,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -83,7 +82,8 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -96,7 +96,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -110,7 +109,6 @@
"assign": "PartitionResult"
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -184,7 +182,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -219,7 +216,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -230,14 +226,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "FIPS and DualStack are enabled, but this partition does not support one or both",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -251,14 +249,12 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
- true,
{
"fn": "getAttr",
"argv": [
@@ -267,11 +263,11 @@
},
"supportsFIPS"
]
- }
+ },
+ true
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -282,14 +278,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "FIPS is enabled but this partition does not support FIPS",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -303,7 +301,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -323,7 +320,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -334,14 +330,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "DualStack is enabled but this partition does not support DualStack",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
@@ -352,9 +350,11 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
diff --git a/apis/braket/2019-09-01/api-2.json b/apis/braket/2019-09-01/api-2.json
index 4d49ca107af..cf1fa7b5473 100644
--- a/apis/braket/2019-09-01/api-2.json
+++ b/apis/braket/2019-09-01/api-2.json
@@ -253,6 +253,29 @@
"scriptModeConfig":{"shape":"ScriptModeConfig"}
}
},
+ "Association":{
+ "type":"structure",
+ "required":[
+ "arn",
+ "type"
+ ],
+ "members":{
+ "arn":{"shape":"BraketResourceArn"},
+ "type":{"shape":"AssociationType"}
+ }
+ },
+ "AssociationType":{
+ "type":"string",
+ "enum":["RESERVATION_TIME_WINDOW_ARN"]
+ },
+ "Associations":{
+ "type":"list",
+ "member":{"shape":"Association"}
+ },
+ "BraketResourceArn":{
+ "type":"string",
+ "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]*:[0-9]{12}:.*$"
+ },
"CancelJobRequest":{
"type":"structure",
"required":["jobArn"],
@@ -349,6 +372,7 @@
],
"members":{
"algorithmSpecification":{"shape":"AlgorithmSpecification"},
+ "associations":{"shape":"CreateJobRequestAssociationsList"},
"checkpointConfig":{"shape":"JobCheckpointConfig"},
"clientToken":{
"shape":"String64",
@@ -365,6 +389,12 @@
"tags":{"shape":"TagsMap"}
}
},
+ "CreateJobRequestAssociationsList":{
+ "type":"list",
+ "member":{"shape":"Association"},
+ "max":1,
+ "min":0
+ },
"CreateJobRequestInputDataConfigList":{
"type":"list",
"member":{"shape":"InputFileConfig"},
@@ -399,6 +429,7 @@
"shape":"JsonValue",
"jsonvalue":true
},
+ "associations":{"shape":"CreateQuantumTaskRequestAssociationsList"},
"clientToken":{
"shape":"String64",
"idempotencyToken":true
@@ -415,6 +446,12 @@
"tags":{"shape":"TagsMap"}
}
},
+ "CreateQuantumTaskRequestAssociationsList":{
+ "type":"list",
+ "member":{"shape":"Association"},
+ "max":1,
+ "min":0
+ },
"CreateQuantumTaskRequestDeviceParametersString":{
"type":"string",
"max":48000,
@@ -599,6 +636,7 @@
],
"members":{
"algorithmSpecification":{"shape":"AlgorithmSpecification"},
+ "associations":{"shape":"Associations"},
"billableDuration":{"shape":"Integer"},
"checkpointConfig":{"shape":"JobCheckpointConfig"},
"createdAt":{"shape":"SyntheticTimestamp_date_time"},
@@ -655,6 +693,7 @@
"status"
],
"members":{
+ "associations":{"shape":"Associations"},
"createdAt":{"shape":"SyntheticTimestamp_date_time"},
"deviceArn":{"shape":"DeviceArn"},
"deviceParameters":{
@@ -703,7 +742,8 @@
"HyperParametersValueString":{
"type":"string",
"max":2500,
- "min":1
+ "min":1,
+ "pattern":"^.*$"
},
"InputConfigList":{
"type":"list",
diff --git a/apis/braket/2019-09-01/docs-2.json b/apis/braket/2019-09-01/docs-2.json
index 4d39d65ee59..de29dc7c077 100644
--- a/apis/braket/2019-09-01/docs-2.json
+++ b/apis/braket/2019-09-01/docs-2.json
@@ -29,6 +29,33 @@
"GetJobResponse$algorithmSpecification": "Definition of the Amazon Braket job created. Specifies the container image the job uses, information about the Python scripts used for entry and training, and the user-defined metrics used to evaluation the job.
"
}
},
+ "Association": {
+ "base": "The Amazon Braket resource and the association type.
",
+ "refs": {
+ "Associations$member": null,
+ "CreateJobRequestAssociationsList$member": null,
+ "CreateQuantumTaskRequestAssociationsList$member": null
+ }
+ },
+ "AssociationType": {
+ "base": null,
+ "refs": {
+ "Association$type": "The association type for the specified Amazon Braket resource arn.
"
+ }
+ },
+ "Associations": {
+ "base": null,
+ "refs": {
+ "GetJobResponse$associations": "The list of Amazon Braket resources associated with the hybrid job.
",
+ "GetQuantumTaskResponse$associations": "The list of Amazon Braket resources associated with the quantum task.
"
+ }
+ },
+ "BraketResourceArn": {
+ "base": null,
+ "refs": {
+ "Association$arn": "The Amazon Braket resource arn.
"
+ }
+ },
"CancelJobRequest": {
"base": null,
"refs": {
@@ -78,6 +105,12 @@
"refs": {
}
},
+ "CreateJobRequestAssociationsList": {
+ "base": null,
+ "refs": {
+ "CreateJobRequest$associations": "The list of Amazon Braket resources associated with the hybrid job.
"
+ }
+ },
"CreateJobRequestInputDataConfigList": {
"base": null,
"refs": {
@@ -100,6 +133,12 @@
"refs": {
}
},
+ "CreateQuantumTaskRequestAssociationsList": {
+ "base": null,
+ "refs": {
+ "CreateQuantumTaskRequest$associations": "The list of Amazon Braket resources associated with the quantum task.
"
+ }
+ },
"CreateQuantumTaskRequestDeviceParametersString": {
"base": null,
"refs": {
@@ -454,7 +493,7 @@
"CancelQuantumTaskRequest$quantumTaskArn": "The ARN of the task to cancel.
",
"CancelQuantumTaskResponse$quantumTaskArn": "The ARN of the task.
",
"CreateQuantumTaskResponse$quantumTaskArn": "The ARN of the task created by the request.
",
- "GetQuantumTaskRequest$quantumTaskArn": "the ARN of the task to retrieve.
",
+ "GetQuantumTaskRequest$quantumTaskArn": "The ARN of the task to retrieve.
",
"GetQuantumTaskResponse$quantumTaskArn": "The ARN of the task.
",
"QuantumTaskSummary$quantumTaskArn": "The ARN of the task.
"
}
@@ -747,7 +786,7 @@
"GetJobResponse$startedAt": "The date and time that the Amazon Braket job was started.
",
"GetQuantumTaskResponse$createdAt": "The time at which the task was created.
",
"GetQuantumTaskResponse$endedAt": "The time at which the task ended.
",
- "JobEventDetails$timeOfEvent": "TThe type of event that occurred related to the Amazon Braket job.
",
+ "JobEventDetails$timeOfEvent": "The type of event that occurred related to the Amazon Braket job.
",
"JobSummary$createdAt": "The date and time that the Amazon Braket job was created.
",
"JobSummary$endedAt": "The date and time that the Amazon Braket job ended.
",
"JobSummary$startedAt": "The date and time that the Amazon Braket job was started.
",
diff --git a/apis/braket/2019-09-01/endpoint-rule-set-1.json b/apis/braket/2019-09-01/endpoint-rule-set-1.json
index 7a57ae60375..6d366bfcdd1 100644
--- a/apis/braket/2019-09-01/endpoint-rule-set-1.json
+++ b/apis/braket/2019-09-01/endpoint-rule-set-1.json
@@ -40,7 +40,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -83,7 +82,8 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -96,7 +96,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -110,7 +109,6 @@
"assign": "PartitionResult"
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -133,7 +131,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -168,7 +165,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -179,14 +175,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "FIPS and DualStack are enabled, but this partition does not support one or both",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -200,14 +198,12 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
- true,
{
"fn": "getAttr",
"argv": [
@@ -216,11 +212,11 @@
},
"supportsFIPS"
]
- }
+ },
+ true
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -231,14 +227,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "FIPS is enabled but this partition does not support FIPS",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [
@@ -252,7 +250,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -272,7 +269,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [],
@@ -283,14 +279,16 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
"error": "DualStack is enabled but this partition does not support DualStack",
"type": "error"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
@@ -301,9 +299,11 @@
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
}
- ]
+ ],
+ "type": "tree"
},
{
"conditions": [],
diff --git a/apis/cloud9/2017-09-23/api-2.json b/apis/cloud9/2017-09-23/api-2.json
index a57b337bf02..aa9cbdd5a4b 100644
--- a/apis/cloud9/2017-09-23/api-2.json
+++ b/apis/cloud9/2017-09-23/api-2.json
@@ -289,7 +289,8 @@
"type":"structure",
"required":[
"name",
- "instanceType"
+ "instanceType",
+ "imageId"
],
"members":{
"name":{"shape":"EnvironmentName"},
@@ -523,7 +524,7 @@
"type":"string",
"max":20,
"min":5,
- "pattern":"^[a-z][1-9][.][a-z0-9]+$"
+ "pattern":"^[a-z]+[1-9][.][a-z0-9]+$"
},
"InternalServerErrorException":{
"type":"structure",
diff --git a/apis/cloud9/2017-09-23/docs-2.json b/apis/cloud9/2017-09-23/docs-2.json
index 67cf30ade09..1324c02158a 100644
--- a/apis/cloud9/2017-09-23/docs-2.json
+++ b/apis/cloud9/2017-09-23/docs-2.json
@@ -232,7 +232,7 @@
"ImageId": {
"base": null,
"refs": {
- "CreateEnvironmentEC2Request$imageId": "The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
From December 04, 2023, you will be required to include the imageId
parameter for the CreateEnvironmentEC2
action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.
From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users.
Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.
AMI aliases
-
Amazon Linux (default): amazonlinux-1-x86_64
-
Amazon Linux 2: amazonlinux-2-x86_64
-
Ubuntu 18.04: ubuntu-18.04-x86_64
-
Ubuntu 22.04: ubuntu-22.04-x86_64
SSM paths
-
Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
-
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
-
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
-
Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
"
+ "CreateEnvironmentEC2Request$imageId": "The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.
From December 04, 2023, you will be required to include the imageId
parameter for the CreateEnvironmentEC2
action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.
From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users.
Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.
AMI aliases
-
Amazon Linux: amazonlinux-1-x86_64
-
Amazon Linux 2: amazonlinux-2-x86_64
-
Ubuntu 18.04: ubuntu-18.04-x86_64
-
Ubuntu 22.04: ubuntu-22.04-x86_64
SSM paths
-
Amazon Linux: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64
-
Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64
-
Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64
-
Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
"
}
},
"InstanceType": {
diff --git a/apis/cloudformation/2010-05-15/waiters-2.json b/apis/cloudformation/2010-05-15/waiters-2.json
index a7a05590e9d..cd37c9113ee 100644
--- a/apis/cloudformation/2010-05-15/waiters-2.json
+++ b/apis/cloudformation/2010-05-15/waiters-2.json
@@ -30,6 +30,54 @@
"matcher": "pathAll",
"state": "success"
},
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_COMPLETE",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_IN_PROGRESS",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_FAILED",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_ROLLBACK_IN_PROGRESS",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_ROLLBACK_FAILED",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
+ "matcher": "pathAll",
+ "state": "success"
+ },
+ {
+ "argument": "Stacks[].StackStatus",
+ "expected": "UPDATE_ROLLBACK_COMPLETE",
+ "matcher": "pathAll",
+ "state": "success"
+ },
{
"argument": "Stacks[].StackStatus",
"expected": "CREATE_FAILED",
diff --git a/apis/finspace/2021-03-12/api-2.json b/apis/finspace/2021-03-12/api-2.json
index 8154a3ace1f..a5d264fe958 100644
--- a/apis/finspace/2021-03-12/api-2.json
+++ b/apis/finspace/2021-03-12/api-2.json
@@ -1599,6 +1599,7 @@
"KxClusterCodeDeploymentStrategy":{
"type":"string",
"enum":[
+ "NO_RESTART",
"ROLLING",
"FORCE"
]
@@ -1644,7 +1645,8 @@
"enum":[
"HDB",
"RDB",
- "GATEWAY"
+ "GATEWAY",
+ "GP"
]
},
"KxClusters":{
diff --git a/apis/finspace/2021-03-12/docs-2.json b/apis/finspace/2021-03-12/docs-2.json
index 875f48439ee..e980d328e5a 100644
--- a/apis/finspace/2021-03-12/docs-2.json
+++ b/apis/finspace/2021-03-12/docs-2.json
@@ -113,7 +113,7 @@
"ChangeRequests": {
"base": null,
"refs": {
- "CreateKxChangesetRequest$changeRequests": "A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and a dbPath. A changeType can has the following values:
All the change requests require a mandatory dbPath attribute that defines the path within the database directory. The s3Path attribute defines the s3 source file path and is required for a PUT change type.
Here is an example of how you can use the change request object:
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}, { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}, { \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.01/\"} ]
In this example, the first request with PUT change type allows you to add files in the given s3Path under the 2020.01.02 partition of the database. The second request with PUT change type allows you to add a single sym file at database root location. The last request with DELETE change type allows you to delete the files under the 2020.01.01 partition of the database.
",
+ "CreateKxChangesetRequest$changeRequests": "A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. A changeType can has the following values:
All the change requests require a mandatory dbPath
attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path
attribute defines the s3 source file path and is required for a PUT change type. The s3path
must end with a trailing / if it is a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
-
This request adds a single sym file at database root location.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\", \"dbPath\":\"/\"}
-
This request adds files in the given s3Path
under the 2020.01.02 partition of the database.
{ \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\", \"dbPath\":\"/2020.01.02/\"}
-
This request adds files in the given s3Path
under the taq table partition of the database.
[ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
-
This request deletes the 2020.01.02 partition of the database.
[{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
-
The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it.
[ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\", \"dbPath\":\"/2020.01.02/taq/\"}]
",
"CreateKxChangesetResponse$changeRequests": "A list of change requests.
",
"GetKxChangesetResponse$changeRequests": "A list of change request objects that are run in order.
"
}
@@ -667,7 +667,7 @@
"CreateKxClusterResponse$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
",
"GetKxClusterResponse$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
",
"KxCluster$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
",
- "UpdateKxClusterCodeConfigurationRequest$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
"
+ "UpdateKxClusterCodeConfigurationRequest$initializationScript": "Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q
.
You cannot update this parameter for a NO_RESTART
deployment.
"
}
},
"InternalServerException": {
@@ -761,7 +761,7 @@
"KxClusterCodeDeploymentStrategy": {
"base": null,
"refs": {
- "KxClusterCodeDeploymentConfiguration$deploymentStrategy": " The type of deployment that you want on a cluster.
-
ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration.
-
FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
"
+ "KxClusterCodeDeploymentConfiguration$deploymentStrategy": " The type of deployment that you want on a cluster.
-
ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration.
-
NO_RESTART – This option updates the cluster without stopping the running q process. It is only available for GP
type cluster. This option is quicker as it reduces the turn around time to update configuration on a cluster.
With this deployment mode, you cannot update the initializationScript
and commandLineArguments
parameters.
-
FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
"
}
},
"KxClusterDescription": {
@@ -813,11 +813,11 @@
"KxClusterType": {
"base": null,
"refs": {
- "CreateKxClusterRequest$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
",
- "CreateKxClusterResponse$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
",
- "GetKxClusterResponse$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
",
- "KxCluster$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
",
- "ListKxClustersRequest$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
"
+ "CreateKxClusterRequest$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
-
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
",
+ "CreateKxClusterResponse$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
-
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
",
+ "GetKxClusterResponse$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
-
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
",
+ "KxCluster$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
-
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
",
+ "ListKxClustersRequest$clusterType": "Specifies the type of KDB database that is being created. The following types are available:
-
HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster.
-
RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration
parameter.
-
GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
-
GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE
AZ mode.
"
}
},
"KxClusters": {
@@ -850,7 +850,7 @@
"CreateKxClusterRequest$commandLineArguments": "Defines the key-value pairs to make them available inside the cluster.
",
"CreateKxClusterResponse$commandLineArguments": "Defines the key-value pairs to make them available inside the cluster.
",
"GetKxClusterResponse$commandLineArguments": "Defines key-value pairs to make them available inside the cluster.
",
- "UpdateKxClusterCodeConfigurationRequest$commandLineArguments": "Specifies the key-value pairs to make them available inside the cluster.
"
+ "UpdateKxClusterCodeConfigurationRequest$commandLineArguments": "Specifies the key-value pairs to make them available inside the cluster.
You cannot update this parameter for a NO_RESTART
deployment.
"
}
},
"KxDatabaseCacheConfiguration": {
diff --git a/apis/medialive/2017-10-14/api-2.json b/apis/medialive/2017-10-14/api-2.json
index 54a53ceccf0..1b0a3ed80cc 100644
--- a/apis/medialive/2017-10-14/api-2.json
+++ b/apis/medialive/2017-10-14/api-2.json
@@ -4111,6 +4111,49 @@
"members": {
}
},
+ "ColorCorrection": {
+ "type": "structure",
+ "members": {
+ "InputColorSpace": {
+ "shape": "ColorSpace",
+ "locationName": "inputColorSpace"
+ },
+ "OutputColorSpace": {
+ "shape": "ColorSpace",
+ "locationName": "outputColorSpace"
+ },
+ "Uri": {
+ "shape": "__string",
+ "locationName": "uri"
+ }
+ },
+ "required": [
+ "OutputColorSpace",
+ "InputColorSpace",
+ "Uri"
+ ]
+ },
+ "ColorCorrectionSettings": {
+ "type": "structure",
+ "members": {
+ "GlobalColorCorrections": {
+ "shape": "__listOfColorCorrection",
+ "locationName": "globalColorCorrections"
+ }
+ },
+ "required": [
+ "GlobalColorCorrections"
+ ]
+ },
+ "ColorSpace": {
+ "type": "string",
+ "enum": [
+ "HDR10",
+ "HLG_2020",
+ "REC_601",
+ "REC_709"
+ ]
+ },
"ColorSpacePassthroughSettings": {
"type": "structure",
"members": {
@@ -6354,6 +6397,10 @@
"ThumbnailConfiguration": {
"shape": "ThumbnailConfiguration",
"locationName": "thumbnailConfiguration"
+ },
+ "ColorCorrectionSettings": {
+ "shape": "ColorCorrectionSettings",
+ "locationName": "colorCorrectionSettings"
}
},
"required": [
@@ -14503,6 +14550,12 @@
"shape": "ChannelSummary"
}
},
+ "__listOfColorCorrection": {
+ "type": "list",
+ "member": {
+ "shape": "ColorCorrection"
+ }
+ },
"__listOfFailoverCondition": {
"type": "list",
"member": {
diff --git a/apis/medialive/2017-10-14/docs-2.json b/apis/medialive/2017-10-14/docs-2.json
index a4deb8957db..6b20cd9ab4e 100644
--- a/apis/medialive/2017-10-14/docs-2.json
+++ b/apis/medialive/2017-10-14/docs-2.json
@@ -656,6 +656,25 @@
"refs": {
}
},
+ "ColorCorrection": {
+ "base": "Property of ColorCorrectionSettings. Used for custom color space conversion. The object identifies one 3D LUT file and specifies the input/output color space combination that the file will be used for.",
+ "refs": {
+ "__listOfColorCorrection$member": null
+ }
+ },
+ "ColorCorrectionSettings": {
+ "base": "Property of encoderSettings. Controls color conversion when you are using 3D LUT files to perform color conversion on video.",
+ "refs": {
+ "EncoderSettings$ColorCorrectionSettings": "Color correction settings"
+ }
+ },
+ "ColorSpace": {
+ "base": "Property of colorCorrections. When you are using 3D LUT files to perform color conversion on video, these are the supported color spaces.",
+ "refs": {
+ "ColorCorrection$InputColorSpace": "The color space of the input.",
+ "ColorCorrection$OutputColorSpace": "The color space of the output."
+ }
+ },
"ColorSpacePassthroughSettings": {
"base": "Passthrough applies no color space conversion to the output",
"refs": {
@@ -4330,6 +4349,12 @@
"ListChannelsResultModel$Channels": null
}
},
+ "__listOfColorCorrection": {
+ "base": null,
+ "refs": {
+ "ColorCorrectionSettings$GlobalColorCorrections": "An array of colorCorrections that applies when you are using 3D LUT files to perform color conversion on video. Each colorCorrection contains one 3D LUT file (that defines the color mapping for converting an input color space to an output color space), and the input/output combination that this 3D LUT file applies to. MediaLive reads the color space in the input metadata, determines the color space that you have specified for the output, and finds and uses the LUT file that applies to this combination."
+ }
+ },
"__listOfFailoverCondition": {
"base": null,
"refs": {
@@ -4691,6 +4716,7 @@
"ChannelSummary$Name": "The name of the channel. (user-mutable)",
"ChannelSummary$RoleArn": "The Amazon Resource Name (ARN) of the role assumed when running the Channel.",
"ClaimDeviceRequest$Id": "The id of the device you want to claim.",
+ "ColorCorrection$Uri": "The URI of the 3D LUT file. The protocol must be 's3:' or 's3ssl:':.",
"CreateChannel$Name": "Name of channel.",
"CreateChannel$RequestId": "Unique request ID to be specified. This is needed to prevent retries from\ncreating multiple resources.\n",
"CreateChannel$Reserved": "Deprecated field that's only usable by whitelisted customers.",
diff --git a/apis/servicecatalog-appregistry/2020-06-24/docs-2.json b/apis/servicecatalog-appregistry/2020-06-24/docs-2.json
index 4b0b7117e83..16201b82c93 100644
--- a/apis/servicecatalog-appregistry/2020-06-24/docs-2.json
+++ b/apis/servicecatalog-appregistry/2020-06-24/docs-2.json
@@ -3,7 +3,7 @@
"service": " Amazon Web Services Service Catalog AppRegistry enables organizations to understand the application context of their Amazon Web Services resources. AppRegistry provides a repository of your applications, their resources, and the application metadata that you use within your enterprise.
",
"operations": {
"AssociateAttributeGroup": "Associates an attribute group with an application to augment the application's metadata with the group's attributes. This feature enables applications to be described with user-defined details that are machine-readable, such as third-party integrations.
",
- "AssociateResource": " Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name.
Minimum permissions
You must have the following permissions to associate a resource using the OPTIONS
parameter set to APPLY_APPLICATION_TAG
.
-
tag:GetResources
-
tag:TagResources
You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess
policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide.
-
resource-groups:DisassociateResource
-
cloudformation:UpdateStack
-
cloudformation:DescribeStacks
In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
",
+ "AssociateResource": " Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name.
Minimum permissions
You must have the following permissions to associate a resource using the OPTIONS
parameter set to APPLY_APPLICATION_TAG
.
-
tag:GetResources
-
tag:TagResources
You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess
policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide.
-
resource-groups:AssociateResource
-
cloudformation:UpdateStack
-
cloudformation:DescribeStacks
In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
",
"CreateApplication": "Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions.
",
"CreateAttributeGroup": "Creates a new attribute group as a container for user-defined attributes. This feature enables users to have full control over their cloud application's metadata in a rich machine-readable format to facilitate integration with automated workflows and third-party tools.
",
"DeleteApplication": "Deletes an application that is specified either by its application ID, name, or ARN. All associated attribute groups and resources must be disassociated from it before deleting an application.
",
diff --git a/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json b/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json
index 39bcc0acd0f..b38c5782e7e 100644
--- a/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json
+++ b/apis/servicecatalog-appregistry/2020-06-24/endpoint-tests-1.json
@@ -646,17 +646,6 @@
"expect": {
"error": "Invalid Configuration: Missing Region"
}
- },
- {
- "documentation": "Partition doesn't support DualStack",
- "expect": {
- "error": "DualStack is enabled but this partition does not support DualStack"
- },
- "params": {
- "Region": "us-isob-east-1",
- "UseFIPS": false,
- "UseDualStack": true
- }
}
],
"version": "1.0"
diff --git a/gems/aws-partitions/CHANGELOG.md b/gems/aws-partitions/CHANGELOG.md
index 5b5290af226..024b7ab82eb 100644
--- a/gems/aws-partitions/CHANGELOG.md
+++ b/gems/aws-partitions/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.863.0 (2023-12-04)
+------------------
+
+* Feature - Updated the partitions source data the determines the AWS service regions and endpoints.
+
1.862.0 (2023-11-30)
------------------
diff --git a/gems/aws-partitions/VERSION b/gems/aws-partitions/VERSION
index 34c49611d78..9d1a018019e 100644
--- a/gems/aws-partitions/VERSION
+++ b/gems/aws-partitions/VERSION
@@ -1 +1 @@
-1.862.0
+1.863.0
diff --git a/gems/aws-partitions/partitions.json b/gems/aws-partitions/partitions.json
index 31291963ca1..a6d021a14f8 100644
--- a/gems/aws-partitions/partitions.json
+++ b/gems/aws-partitions/partitions.json
@@ -15873,6 +15873,30 @@
"deprecated" : true,
"hostname" : "signer-fips.us-west-2.amazonaws.com"
},
+ "fips-verification-us-east-1" : {
+ "credentialScope" : {
+ "region" : "us-east-1"
+ },
+ "hostname" : "verification.signer-fips.us-east-1.amazonaws.com"
+ },
+ "fips-verification-us-east-2" : {
+ "credentialScope" : {
+ "region" : "us-east-2"
+ },
+ "hostname" : "verification.signer-fips.us-east-2.amazonaws.com"
+ },
+ "fips-verification-us-west-1" : {
+ "credentialScope" : {
+ "region" : "us-west-1"
+ },
+ "hostname" : "verification.signer-fips.us-west-1.amazonaws.com"
+ },
+ "fips-verification-us-west-2" : {
+ "credentialScope" : {
+ "region" : "us-west-2"
+ },
+ "hostname" : "verification.signer-fips.us-west-2.amazonaws.com"
+ },
"me-south-1" : { },
"sa-east-1" : { },
"us-east-1" : {
@@ -15898,6 +15922,126 @@
"hostname" : "signer-fips.us-west-2.amazonaws.com",
"tags" : [ "fips" ]
} ]
+ },
+ "verification-af-south-1" : {
+ "credentialScope" : {
+ "region" : "af-south-1"
+ },
+ "hostname" : "verification.signer.af-south-1.amazonaws.com"
+ },
+ "verification-ap-east-1" : {
+ "credentialScope" : {
+ "region" : "ap-east-1"
+ },
+ "hostname" : "verification.signer.ap-east-1.amazonaws.com"
+ },
+ "verification-ap-northeast-1" : {
+ "credentialScope" : {
+ "region" : "ap-northeast-1"
+ },
+ "hostname" : "verification.signer.ap-northeast-1.amazonaws.com"
+ },
+ "verification-ap-northeast-2" : {
+ "credentialScope" : {
+ "region" : "ap-northeast-2"
+ },
+ "hostname" : "verification.signer.ap-northeast-2.amazonaws.com"
+ },
+ "verification-ap-south-1" : {
+ "credentialScope" : {
+ "region" : "ap-south-1"
+ },
+ "hostname" : "verification.signer.ap-south-1.amazonaws.com"
+ },
+ "verification-ap-southeast-1" : {
+ "credentialScope" : {
+ "region" : "ap-southeast-1"
+ },
+ "hostname" : "verification.signer.ap-southeast-1.amazonaws.com"
+ },
+ "verification-ap-southeast-2" : {
+ "credentialScope" : {
+ "region" : "ap-southeast-2"
+ },
+ "hostname" : "verification.signer.ap-southeast-2.amazonaws.com"
+ },
+ "verification-ca-central-1" : {
+ "credentialScope" : {
+ "region" : "ca-central-1"
+ },
+ "hostname" : "verification.signer.ca-central-1.amazonaws.com"
+ },
+ "verification-eu-central-1" : {
+ "credentialScope" : {
+ "region" : "eu-central-1"
+ },
+ "hostname" : "verification.signer.eu-central-1.amazonaws.com"
+ },
+ "verification-eu-north-1" : {
+ "credentialScope" : {
+ "region" : "eu-north-1"
+ },
+ "hostname" : "verification.signer.eu-north-1.amazonaws.com"
+ },
+ "verification-eu-south-1" : {
+ "credentialScope" : {
+ "region" : "eu-south-1"
+ },
+ "hostname" : "verification.signer.eu-south-1.amazonaws.com"
+ },
+ "verification-eu-west-1" : {
+ "credentialScope" : {
+ "region" : "eu-west-1"
+ },
+ "hostname" : "verification.signer.eu-west-1.amazonaws.com"
+ },
+ "verification-eu-west-2" : {
+ "credentialScope" : {
+ "region" : "eu-west-2"
+ },
+ "hostname" : "verification.signer.eu-west-2.amazonaws.com"
+ },
+ "verification-eu-west-3" : {
+ "credentialScope" : {
+ "region" : "eu-west-3"
+ },
+ "hostname" : "verification.signer.eu-west-3.amazonaws.com"
+ },
+ "verification-me-south-1" : {
+ "credentialScope" : {
+ "region" : "me-south-1"
+ },
+ "hostname" : "verification.signer.me-south-1.amazonaws.com"
+ },
+ "verification-sa-east-1" : {
+ "credentialScope" : {
+ "region" : "sa-east-1"
+ },
+ "hostname" : "verification.signer.sa-east-1.amazonaws.com"
+ },
+ "verification-us-east-1" : {
+ "credentialScope" : {
+ "region" : "us-east-1"
+ },
+ "hostname" : "verification.signer.us-east-1.amazonaws.com"
+ },
+ "verification-us-east-2" : {
+ "credentialScope" : {
+ "region" : "us-east-2"
+ },
+ "hostname" : "verification.signer.us-east-2.amazonaws.com"
+ },
+ "verification-us-west-1" : {
+ "credentialScope" : {
+ "region" : "us-west-1"
+ },
+ "hostname" : "verification.signer.us-west-1.amazonaws.com"
+ },
+ "verification-us-west-2" : {
+ "credentialScope" : {
+ "region" : "us-west-2"
+ },
+ "hostname" : "verification.signer.us-west-2.amazonaws.com"
}
}
},
@@ -20228,7 +20372,19 @@
"signer" : {
"endpoints" : {
"cn-north-1" : { },
- "cn-northwest-1" : { }
+ "cn-northwest-1" : { },
+ "verification-cn-north-1" : {
+ "credentialScope" : {
+ "region" : "cn-north-1"
+ },
+ "hostname" : "verification.signer.cn-north-1.amazonaws.com.cn"
+ },
+ "verification-cn-northwest-1" : {
+ "credentialScope" : {
+ "region" : "cn-northwest-1"
+ },
+ "hostname" : "verification.signer.cn-northwest-1.amazonaws.com.cn"
+ }
}
},
"sms" : {
@@ -25723,8 +25879,32 @@
},
"redshift" : {
"endpoints" : {
- "us-iso-east-1" : { },
- "us-iso-west-1" : { }
+ "fips-us-iso-east-1" : {
+ "credentialScope" : {
+ "region" : "us-iso-east-1"
+ },
+ "deprecated" : true,
+ "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov"
+ },
+ "fips-us-iso-west-1" : {
+ "credentialScope" : {
+ "region" : "us-iso-west-1"
+ },
+ "deprecated" : true,
+ "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov"
+ },
+ "us-iso-east-1" : {
+ "variants" : [ {
+ "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov",
+ "tags" : [ "fips" ]
+ } ]
+ },
+ "us-iso-west-1" : {
+ "variants" : [ {
+ "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov",
+ "tags" : [ "fips" ]
+ } ]
+ }
}
},
"resource-groups" : {
@@ -26262,7 +26442,19 @@
},
"redshift" : {
"endpoints" : {
- "us-isob-east-1" : { }
+ "fips-us-isob-east-1" : {
+ "credentialScope" : {
+ "region" : "us-isob-east-1"
+ },
+ "deprecated" : true,
+ "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov"
+ },
+ "us-isob-east-1" : {
+ "variants" : [ {
+ "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov",
+ "tags" : [ "fips" ]
+ } ]
+ }
}
},
"resource-groups" : {
diff --git a/gems/aws-sdk-appregistry/CHANGELOG.md b/gems/aws-sdk-appregistry/CHANGELOG.md
index 01f19d0fc3d..8dd3d3b0f32 100644
--- a/gems/aws-sdk-appregistry/CHANGELOG.md
+++ b/gems/aws-sdk-appregistry/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.31.0 (2023-12-04)
+------------------
+
+* Feature - Documentation-only updates for Dawn
+
1.30.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-appregistry/VERSION b/gems/aws-sdk-appregistry/VERSION
index 034552a83ee..34aae156b19 100644
--- a/gems/aws-sdk-appregistry/VERSION
+++ b/gems/aws-sdk-appregistry/VERSION
@@ -1 +1 @@
-1.30.0
+1.31.0
diff --git a/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry.rb b/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry.rb
index 7901419d6fa..9d34df0f6a0 100644
--- a/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry.rb
+++ b/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry.rb
@@ -52,6 +52,6 @@
# @!group service
module Aws::AppRegistry
- GEM_VERSION = '1.30.0'
+ GEM_VERSION = '1.31.0'
end
diff --git a/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry/client.rb b/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry/client.rb
index 1f10733f715..d0e478ebbc8 100644
--- a/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry/client.rb
+++ b/gems/aws-sdk-appregistry/lib/aws-sdk-appregistry/client.rb
@@ -444,7 +444,7 @@ def associate_attribute_group(params = {}, options = {})
# see [AWSServiceCatalogAppRegistryFullAccess][1] in the AppRegistry
# Administrator Guide.
#
- # * `resource-groups:DisassociateResource`
+ # * `resource-groups:AssociateResource`
#
# * `cloudformation:UpdateStack`
#
@@ -1522,7 +1522,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-appregistry'
- context[:gem_version] = '1.30.0'
+ context[:gem_version] = '1.31.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-appregistry/spec/endpoint_provider_spec.rb b/gems/aws-sdk-appregistry/spec/endpoint_provider_spec.rb
index 76c2dbc560f..3543086e3b8 100644
--- a/gems/aws-sdk-appregistry/spec/endpoint_provider_spec.rb
+++ b/gems/aws-sdk-appregistry/spec/endpoint_provider_spec.rb
@@ -721,18 +721,5 @@ module Aws::AppRegistry
end
end
- context 'Partition doesn't support DualStack' do
- let(:expected) do
- {"error"=>"DualStack is enabled but this partition does not support DualStack"}
- end
-
- it 'produces the expected output from the EndpointProvider' do
- params = EndpointParameters.new(**{:region=>"us-isob-east-1", :use_fips=>false, :use_dual_stack=>true})
- expect do
- subject.resolve_endpoint(params)
- end.to raise_error(ArgumentError, expected['error'])
- end
- end
-
end
end
diff --git a/gems/aws-sdk-billingconductor/CHANGELOG.md b/gems/aws-sdk-billingconductor/CHANGELOG.md
index 39fa9a36380..aeec0a8afad 100644
--- a/gems/aws-sdk-billingconductor/CHANGELOG.md
+++ b/gems/aws-sdk-billingconductor/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.18.0 (2023-12-04)
+------------------
+
+* Feature - This release adds the ability to specify a linked account of the billing group for the custom line item resource.
+
1.17.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-billingconductor/VERSION b/gems/aws-sdk-billingconductor/VERSION
index 092afa15df4..84cc529467b 100644
--- a/gems/aws-sdk-billingconductor/VERSION
+++ b/gems/aws-sdk-billingconductor/VERSION
@@ -1 +1 @@
-1.17.0
+1.18.0
diff --git a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor.rb b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor.rb
index 89926a6102b..04edfaab3c1 100644
--- a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor.rb
+++ b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor.rb
@@ -53,6 +53,6 @@
# @!group service
module Aws::BillingConductor
- GEM_VERSION = '1.17.0'
+ GEM_VERSION = '1.18.0'
end
diff --git a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client.rb b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client.rb
index f1f305c23ab..36e22782b00 100644
--- a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client.rb
+++ b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client.rb
@@ -658,6 +658,10 @@ def create_billing_group(params = {}, options = {})
# A `CustomLineItemChargeDetails` that describes the charge details for
# a custom line item.
#
+ # @option params [String] :account_id
+ # The Amazon Web Services account in which this custom line item will be
+ # applied to.
+ #
# @return [Types::CreateCustomLineItemOutput] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::CreateCustomLineItemOutput#arn #arn} => String
@@ -693,6 +697,7 @@ def create_billing_group(params = {}, options = {})
# },
# ],
# },
+ # account_id: "AccountId",
# })
#
# @example Response structure
@@ -1297,6 +1302,7 @@ def list_billing_groups(params = {}, options = {})
# resp.custom_line_item_versions[0].end_billing_period #=> String
# resp.custom_line_item_versions[0].arn #=> String
# resp.custom_line_item_versions[0].start_time #=> Integer
+ # resp.custom_line_item_versions[0].account_id #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/ListCustomLineItemVersions AWS API Documentation
@@ -1344,6 +1350,7 @@ def list_custom_line_item_versions(params = {}, options = {})
# names: ["CustomLineItemName"],
# billing_groups: ["BillingGroupArn"],
# arns: ["CustomLineItemArn"],
+ # account_ids: ["AccountId"],
# },
# })
#
@@ -1367,6 +1374,7 @@ def list_custom_line_item_versions(params = {}, options = {})
# resp.custom_line_items[0].creation_time #=> Integer
# resp.custom_line_items[0].last_modified_time #=> Integer
# resp.custom_line_items[0].association_size #=> Integer
+ # resp.custom_line_items[0].account_id #=> String
# resp.next_token #=> String
#
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/ListCustomLineItems AWS API Documentation
@@ -2041,7 +2049,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-billingconductor'
- context[:gem_version] = '1.17.0'
+ context[:gem_version] = '1.18.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client_api.rb b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client_api.rb
index 8623cd84249..1f949e8cde4 100644
--- a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client_api.rb
+++ b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/client_api.rb
@@ -335,6 +335,7 @@ module ClientApi
CreateCustomLineItemInput.add_member(:billing_period_range, Shapes::ShapeRef.new(shape: CustomLineItemBillingPeriodRange, location_name: "BillingPeriodRange"))
CreateCustomLineItemInput.add_member(:tags, Shapes::ShapeRef.new(shape: TagMap, location_name: "Tags"))
CreateCustomLineItemInput.add_member(:charge_details, Shapes::ShapeRef.new(shape: CustomLineItemChargeDetails, required: true, location_name: "ChargeDetails"))
+ CreateCustomLineItemInput.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "AccountId"))
CreateCustomLineItemInput.struct_class = Types::CreateCustomLineItemInput
CreateCustomLineItemOutput.add_member(:arn, Shapes::ShapeRef.new(shape: CustomLineItemArn, location_name: "Arn"))
@@ -406,6 +407,7 @@ module ClientApi
CustomLineItemListElement.add_member(:creation_time, Shapes::ShapeRef.new(shape: Instant, location_name: "CreationTime"))
CustomLineItemListElement.add_member(:last_modified_time, Shapes::ShapeRef.new(shape: Instant, location_name: "LastModifiedTime"))
CustomLineItemListElement.add_member(:association_size, Shapes::ShapeRef.new(shape: NumberOfAssociations, location_name: "AssociationSize"))
+ CustomLineItemListElement.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "AccountId"))
CustomLineItemListElement.struct_class = Types::CustomLineItemListElement
CustomLineItemNameList.member = Shapes::ShapeRef.new(shape: CustomLineItemName)
@@ -429,6 +431,7 @@ module ClientApi
CustomLineItemVersionListElement.add_member(:end_billing_period, Shapes::ShapeRef.new(shape: BillingPeriod, location_name: "EndBillingPeriod"))
CustomLineItemVersionListElement.add_member(:arn, Shapes::ShapeRef.new(shape: CustomLineItemArn, location_name: "Arn"))
CustomLineItemVersionListElement.add_member(:start_time, Shapes::ShapeRef.new(shape: Instant, location_name: "StartTime"))
+ CustomLineItemVersionListElement.add_member(:account_id, Shapes::ShapeRef.new(shape: AccountId, location_name: "AccountId"))
CustomLineItemVersionListElement.struct_class = Types::CustomLineItemVersionListElement
DeleteBillingGroupInput.add_member(:arn, Shapes::ShapeRef.new(shape: BillingGroupArn, required: true, location_name: "Arn"))
@@ -570,6 +573,7 @@ module ClientApi
ListCustomLineItemsFilter.add_member(:names, Shapes::ShapeRef.new(shape: CustomLineItemNameList, location_name: "Names"))
ListCustomLineItemsFilter.add_member(:billing_groups, Shapes::ShapeRef.new(shape: BillingGroupArnList, location_name: "BillingGroups"))
ListCustomLineItemsFilter.add_member(:arns, Shapes::ShapeRef.new(shape: CustomLineItemArns, location_name: "Arns"))
+ ListCustomLineItemsFilter.add_member(:account_ids, Shapes::ShapeRef.new(shape: AccountIdList, location_name: "AccountIds"))
ListCustomLineItemsFilter.struct_class = Types::ListCustomLineItemsFilter
ListCustomLineItemsInput.add_member(:billing_period, Shapes::ShapeRef.new(shape: BillingPeriod, location_name: "BillingPeriod"))
diff --git a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/endpoint_provider.rb b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/endpoint_provider.rb
index 2df6e2a43fb..fecef04189e 100644
--- a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/endpoint_provider.rb
+++ b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/endpoint_provider.rb
@@ -35,7 +35,7 @@ def resolve_endpoint(parameters)
raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
end
if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"))
+ if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true)
return Aws::Endpoints::Endpoint.new(url: "https://billingconductor-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
end
raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
diff --git a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/types.rb b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/types.rb
index e2e073e1a0c..79da9f24fe1 100644
--- a/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/types.rb
+++ b/gems/aws-sdk-billingconductor/lib/aws-sdk-billingconductor/types.rb
@@ -515,6 +515,11 @@ class CreateBillingGroupOutput < Struct.new(
# for a custom line item.
# @return [Types::CustomLineItemChargeDetails]
#
+ # @!attribute [rw] account_id
+ # The Amazon Web Services account in which this custom line item will
+ # be applied to.
+ # @return [String]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/CreateCustomLineItemInput AWS API Documentation
#
class CreateCustomLineItemInput < Struct.new(
@@ -524,7 +529,8 @@ class CreateCustomLineItemInput < Struct.new(
:billing_group_arn,
:billing_period_range,
:tags,
- :charge_details)
+ :charge_details,
+ :account_id)
SENSITIVE = [:name, :description]
include Aws::Structure
end
@@ -837,6 +843,11 @@ class CustomLineItemFlatChargeDetails < Struct.new(
# The number of resources that are associated to the custom line item.
# @return [Integer]
#
+ # @!attribute [rw] account_id
+ # The Amazon Web Services account in which this custom line item will
+ # be applied to.
+ # @return [String]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/CustomLineItemListElement AWS API Documentation
#
class CustomLineItemListElement < Struct.new(
@@ -849,7 +860,8 @@ class CustomLineItemListElement < Struct.new(
:billing_group_arn,
:creation_time,
:last_modified_time,
- :association_size)
+ :association_size,
+ :account_id)
SENSITIVE = [:name, :description]
include Aws::Structure
end
@@ -934,6 +946,11 @@ class CustomLineItemPercentageChargeDetails < Struct.new(
# The inclusive start time.
# @return [Integer]
#
+ # @!attribute [rw] account_id
+ # The Amazon Web Services account in which this custom line item will
+ # be applied to.
+ # @return [String]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/CustomLineItemVersionListElement AWS API Documentation
#
class CustomLineItemVersionListElement < Struct.new(
@@ -949,7 +966,8 @@ class CustomLineItemVersionListElement < Struct.new(
:start_billing_period,
:end_billing_period,
:arn,
- :start_time)
+ :start_time,
+ :account_id)
SENSITIVE = [:name, :description]
include Aws::Structure
end
@@ -1621,12 +1639,18 @@ class ListCustomLineItemVersionsOutput < Struct.new(
# A list of custom line item ARNs to retrieve information.
# @return [Array]
#
+ # @!attribute [rw] account_ids
+ # The Amazon Web Services accounts in which this custom line item will
+ # be applied to.
+ # @return [Array]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/billingconductor-2021-07-30/ListCustomLineItemsFilter AWS API Documentation
#
class ListCustomLineItemsFilter < Struct.new(
:names,
:billing_groups,
- :arns)
+ :arns,
+ :account_ids)
SENSITIVE = []
include Aws::Structure
end
diff --git a/gems/aws-sdk-braket/CHANGELOG.md b/gems/aws-sdk-braket/CHANGELOG.md
index 421520e220b..7268f1cb2cd 100644
--- a/gems/aws-sdk-braket/CHANGELOG.md
+++ b/gems/aws-sdk-braket/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.32.0 (2023-12-04)
+------------------
+
+* Feature - This release enhances service support to create quantum tasks and hybrid jobs associated with Braket Direct Reservations.
+
1.31.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-braket/VERSION b/gems/aws-sdk-braket/VERSION
index 34aae156b19..359c41089a4 100644
--- a/gems/aws-sdk-braket/VERSION
+++ b/gems/aws-sdk-braket/VERSION
@@ -1 +1 @@
-1.31.0
+1.32.0
diff --git a/gems/aws-sdk-braket/lib/aws-sdk-braket.rb b/gems/aws-sdk-braket/lib/aws-sdk-braket.rb
index 1ea5b9bdb60..01bfc8dba0e 100644
--- a/gems/aws-sdk-braket/lib/aws-sdk-braket.rb
+++ b/gems/aws-sdk-braket/lib/aws-sdk-braket.rb
@@ -52,6 +52,6 @@
# @!group service
module Aws::Braket
- GEM_VERSION = '1.31.0'
+ GEM_VERSION = '1.32.0'
end
diff --git a/gems/aws-sdk-braket/lib/aws-sdk-braket/client.rb b/gems/aws-sdk-braket/lib/aws-sdk-braket/client.rb
index 9645fc53dd5..814dae7af21 100644
--- a/gems/aws-sdk-braket/lib/aws-sdk-braket/client.rb
+++ b/gems/aws-sdk-braket/lib/aws-sdk-braket/client.rb
@@ -462,6 +462,9 @@ def cancel_quantum_task(params = {}, options = {})
# container image the job uses and information about the Python scripts
# used for entry and training.
#
+ # @option params [Array] :associations
+ # The list of Amazon Braket resources associated with the hybrid job.
+ #
# @option params [Types::JobCheckpointConfig] :checkpoint_config
# Information about the output locations for job checkpoint data.
#
@@ -527,6 +530,12 @@ def cancel_quantum_task(params = {}, options = {})
# s3_uri: "S3Path", # required
# },
# },
+ # associations: [
+ # {
+ # arn: "BraketResourceArn", # required
+ # type: "RESERVATION_TIME_WINDOW_ARN", # required, accepts RESERVATION_TIME_WINDOW_ARN
+ # },
+ # ],
# checkpoint_config: {
# local_path: "String4096",
# s3_uri: "S3Path", # required
@@ -590,6 +599,9 @@ def create_job(params = {}, options = {})
# when the required value (Hash, Array, etc.) is provided according to
# the description.**
#
+ # @option params [Array] :associations
+ # The list of Amazon Braket resources associated with the quantum task.
+ #
# @option params [required, String] :client_token
# The client token associated with the request.
#
@@ -631,6 +643,12 @@ def create_job(params = {}, options = {})
#
# resp = client.create_quantum_task({
# action: "JsonValue", # required
+ # associations: [
+ # {
+ # arn: "BraketResourceArn", # required
+ # type: "RESERVATION_TIME_WINDOW_ARN", # required, accepts RESERVATION_TIME_WINDOW_ARN
+ # },
+ # ],
# client_token: "String64", # required
# device_arn: "DeviceArn", # required
# device_parameters: "CreateQuantumTaskRequestDeviceParametersString",
@@ -722,6 +740,7 @@ def get_device(params = {}, options = {})
# @return [Types::GetJobResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
# * {Types::GetJobResponse#algorithm_specification #algorithm_specification} => Types::AlgorithmSpecification
+ # * {Types::GetJobResponse#associations #associations} => Array<Types::Association>
# * {Types::GetJobResponse#billable_duration #billable_duration} => Integer
# * {Types::GetJobResponse#checkpoint_config #checkpoint_config} => Types::JobCheckpointConfig
# * {Types::GetJobResponse#created_at #created_at} => Time
@@ -755,6 +774,9 @@ def get_device(params = {}, options = {})
# resp.algorithm_specification.script_mode_config.compression_type #=> String, one of "NONE", "GZIP"
# resp.algorithm_specification.script_mode_config.entry_point #=> String
# resp.algorithm_specification.script_mode_config.s3_uri #=> String
+ # resp.associations #=> Array
+ # resp.associations[0].arn #=> String
+ # resp.associations[0].type #=> String, one of "RESERVATION_TIME_WINDOW_ARN"
# resp.billable_duration #=> Integer
# resp.checkpoint_config.local_path #=> String
# resp.checkpoint_config.s3_uri #=> String
@@ -804,10 +826,11 @@ def get_job(params = {}, options = {})
# A list of attributes to return information for.
#
# @option params [required, String] :quantum_task_arn
- # the ARN of the task to retrieve.
+ # The ARN of the task to retrieve.
#
# @return [Types::GetQuantumTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
#
+ # * {Types::GetQuantumTaskResponse#associations #associations} => Array<Types::Association>
# * {Types::GetQuantumTaskResponse#created_at #created_at} => Time
# * {Types::GetQuantumTaskResponse#device_arn #device_arn} => String
# * {Types::GetQuantumTaskResponse#device_parameters #device_parameters} => String
@@ -831,6 +854,9 @@ def get_job(params = {}, options = {})
#
# @example Response structure
#
+ # resp.associations #=> Array
+ # resp.associations[0].arn #=> String
+ # resp.associations[0].type #=> String, one of "RESERVATION_TIME_WINDOW_ARN"
# resp.created_at #=> Time
# resp.device_arn #=> String
# resp.device_parameters #=> String
@@ -1124,7 +1150,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-braket'
- context[:gem_version] = '1.31.0'
+ context[:gem_version] = '1.32.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-braket/lib/aws-sdk-braket/client_api.rb b/gems/aws-sdk-braket/lib/aws-sdk-braket/client_api.rb
index a7bcbc779bc..5a9a7b60b37 100644
--- a/gems/aws-sdk-braket/lib/aws-sdk-braket/client_api.rb
+++ b/gems/aws-sdk-braket/lib/aws-sdk-braket/client_api.rb
@@ -15,6 +15,10 @@ module ClientApi
AccessDeniedException = Shapes::StructureShape.new(name: 'AccessDeniedException')
AlgorithmSpecification = Shapes::StructureShape.new(name: 'AlgorithmSpecification')
+ Association = Shapes::StructureShape.new(name: 'Association')
+ AssociationType = Shapes::StringShape.new(name: 'AssociationType')
+ Associations = Shapes::ListShape.new(name: 'Associations')
+ BraketResourceArn = Shapes::StringShape.new(name: 'BraketResourceArn')
CancelJobRequest = Shapes::StructureShape.new(name: 'CancelJobRequest')
CancelJobResponse = Shapes::StructureShape.new(name: 'CancelJobResponse')
CancelQuantumTaskRequest = Shapes::StructureShape.new(name: 'CancelQuantumTaskRequest')
@@ -24,10 +28,12 @@ module ClientApi
ConflictException = Shapes::StructureShape.new(name: 'ConflictException')
ContainerImage = Shapes::StructureShape.new(name: 'ContainerImage')
CreateJobRequest = Shapes::StructureShape.new(name: 'CreateJobRequest')
+ CreateJobRequestAssociationsList = Shapes::ListShape.new(name: 'CreateJobRequestAssociationsList')
CreateJobRequestInputDataConfigList = Shapes::ListShape.new(name: 'CreateJobRequestInputDataConfigList')
CreateJobRequestJobNameString = Shapes::StringShape.new(name: 'CreateJobRequestJobNameString')
CreateJobResponse = Shapes::StructureShape.new(name: 'CreateJobResponse')
CreateQuantumTaskRequest = Shapes::StructureShape.new(name: 'CreateQuantumTaskRequest')
+ CreateQuantumTaskRequestAssociationsList = Shapes::ListShape.new(name: 'CreateQuantumTaskRequestAssociationsList')
CreateQuantumTaskRequestDeviceParametersString = Shapes::StringShape.new(name: 'CreateQuantumTaskRequestDeviceParametersString')
CreateQuantumTaskRequestOutputS3BucketString = Shapes::StringShape.new(name: 'CreateQuantumTaskRequestOutputS3BucketString')
CreateQuantumTaskRequestOutputS3KeyPrefixString = Shapes::StringShape.new(name: 'CreateQuantumTaskRequestOutputS3KeyPrefixString')
@@ -142,6 +148,12 @@ module ClientApi
AlgorithmSpecification.add_member(:script_mode_config, Shapes::ShapeRef.new(shape: ScriptModeConfig, location_name: "scriptModeConfig"))
AlgorithmSpecification.struct_class = Types::AlgorithmSpecification
+ Association.add_member(:arn, Shapes::ShapeRef.new(shape: BraketResourceArn, required: true, location_name: "arn"))
+ Association.add_member(:type, Shapes::ShapeRef.new(shape: AssociationType, required: true, location_name: "type"))
+ Association.struct_class = Types::Association
+
+ Associations.member = Shapes::ShapeRef.new(shape: Association)
+
CancelJobRequest.add_member(:job_arn, Shapes::ShapeRef.new(shape: JobArn, required: true, location: "uri", location_name: "jobArn"))
CancelJobRequest.struct_class = Types::CancelJobRequest
@@ -164,6 +176,7 @@ module ClientApi
ContainerImage.struct_class = Types::ContainerImage
CreateJobRequest.add_member(:algorithm_specification, Shapes::ShapeRef.new(shape: AlgorithmSpecification, required: true, location_name: "algorithmSpecification"))
+ CreateJobRequest.add_member(:associations, Shapes::ShapeRef.new(shape: CreateJobRequestAssociationsList, location_name: "associations"))
CreateJobRequest.add_member(:checkpoint_config, Shapes::ShapeRef.new(shape: JobCheckpointConfig, location_name: "checkpointConfig"))
CreateJobRequest.add_member(:client_token, Shapes::ShapeRef.new(shape: String64, required: true, location_name: "clientToken", metadata: {"idempotencyToken"=>true}))
CreateJobRequest.add_member(:device_config, Shapes::ShapeRef.new(shape: DeviceConfig, required: true, location_name: "deviceConfig"))
@@ -177,12 +190,15 @@ module ClientApi
CreateJobRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagsMap, location_name: "tags"))
CreateJobRequest.struct_class = Types::CreateJobRequest
+ CreateJobRequestAssociationsList.member = Shapes::ShapeRef.new(shape: Association)
+
CreateJobRequestInputDataConfigList.member = Shapes::ShapeRef.new(shape: InputFileConfig)
CreateJobResponse.add_member(:job_arn, Shapes::ShapeRef.new(shape: JobArn, required: true, location_name: "jobArn"))
CreateJobResponse.struct_class = Types::CreateJobResponse
CreateQuantumTaskRequest.add_member(:action, Shapes::ShapeRef.new(shape: JsonValue, required: true, location_name: "action", metadata: {"jsonvalue"=>true}))
+ CreateQuantumTaskRequest.add_member(:associations, Shapes::ShapeRef.new(shape: CreateQuantumTaskRequestAssociationsList, location_name: "associations"))
CreateQuantumTaskRequest.add_member(:client_token, Shapes::ShapeRef.new(shape: String64, required: true, location_name: "clientToken", metadata: {"idempotencyToken"=>true}))
CreateQuantumTaskRequest.add_member(:device_arn, Shapes::ShapeRef.new(shape: DeviceArn, required: true, location_name: "deviceArn"))
CreateQuantumTaskRequest.add_member(:device_parameters, Shapes::ShapeRef.new(shape: CreateQuantumTaskRequestDeviceParametersString, location_name: "deviceParameters", metadata: {"jsonvalue"=>true}))
@@ -193,6 +209,8 @@ module ClientApi
CreateQuantumTaskRequest.add_member(:tags, Shapes::ShapeRef.new(shape: TagsMap, location_name: "tags"))
CreateQuantumTaskRequest.struct_class = Types::CreateQuantumTaskRequest
+ CreateQuantumTaskRequestAssociationsList.member = Shapes::ShapeRef.new(shape: Association)
+
CreateQuantumTaskResponse.add_member(:quantum_task_arn, Shapes::ShapeRef.new(shape: QuantumTaskArn, required: true, location_name: "quantumTaskArn"))
CreateQuantumTaskResponse.struct_class = Types::CreateQuantumTaskResponse
@@ -241,6 +259,7 @@ module ClientApi
GetJobRequest.struct_class = Types::GetJobRequest
GetJobResponse.add_member(:algorithm_specification, Shapes::ShapeRef.new(shape: AlgorithmSpecification, required: true, location_name: "algorithmSpecification"))
+ GetJobResponse.add_member(:associations, Shapes::ShapeRef.new(shape: Associations, location_name: "associations"))
GetJobResponse.add_member(:billable_duration, Shapes::ShapeRef.new(shape: Integer, location_name: "billableDuration"))
GetJobResponse.add_member(:checkpoint_config, Shapes::ShapeRef.new(shape: JobCheckpointConfig, location_name: "checkpointConfig"))
GetJobResponse.add_member(:created_at, Shapes::ShapeRef.new(shape: SyntheticTimestamp_date_time, required: true, location_name: "createdAt"))
@@ -266,6 +285,7 @@ module ClientApi
GetQuantumTaskRequest.add_member(:quantum_task_arn, Shapes::ShapeRef.new(shape: QuantumTaskArn, required: true, location: "uri", location_name: "quantumTaskArn"))
GetQuantumTaskRequest.struct_class = Types::GetQuantumTaskRequest
+ GetQuantumTaskResponse.add_member(:associations, Shapes::ShapeRef.new(shape: Associations, location_name: "associations"))
GetQuantumTaskResponse.add_member(:created_at, Shapes::ShapeRef.new(shape: SyntheticTimestamp_date_time, required: true, location_name: "createdAt"))
GetQuantumTaskResponse.add_member(:device_arn, Shapes::ShapeRef.new(shape: DeviceArn, required: true, location_name: "deviceArn"))
GetQuantumTaskResponse.add_member(:device_parameters, Shapes::ShapeRef.new(shape: JsonValue, required: true, location_name: "deviceParameters", metadata: {"jsonvalue"=>true}))
diff --git a/gems/aws-sdk-braket/lib/aws-sdk-braket/endpoint_provider.rb b/gems/aws-sdk-braket/lib/aws-sdk-braket/endpoint_provider.rb
index 72825a0e8dd..6109cc893ed 100644
--- a/gems/aws-sdk-braket/lib/aws-sdk-braket/endpoint_provider.rb
+++ b/gems/aws-sdk-braket/lib/aws-sdk-braket/endpoint_provider.rb
@@ -32,7 +32,7 @@ def resolve_endpoint(parameters)
raise ArgumentError, "FIPS and DualStack are enabled, but this partition does not support one or both"
end
if Aws::Endpoints::Matchers.boolean_equals?(use_fips, true)
- if Aws::Endpoints::Matchers.boolean_equals?(true, Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"))
+ if Aws::Endpoints::Matchers.boolean_equals?(Aws::Endpoints::Matchers.attr(partition_result, "supportsFIPS"), true)
return Aws::Endpoints::Endpoint.new(url: "https://braket-fips.#{region}.#{partition_result['dnsSuffix']}", headers: {}, properties: {})
end
raise ArgumentError, "FIPS is enabled but this partition does not support FIPS"
diff --git a/gems/aws-sdk-braket/lib/aws-sdk-braket/types.rb b/gems/aws-sdk-braket/lib/aws-sdk-braket/types.rb
index 4eee257eb52..b0e7ac08e1c 100644
--- a/gems/aws-sdk-braket/lib/aws-sdk-braket/types.rb
+++ b/gems/aws-sdk-braket/lib/aws-sdk-braket/types.rb
@@ -45,6 +45,25 @@ class AlgorithmSpecification < Struct.new(
include Aws::Structure
end
+ # The Amazon Braket resource and the association type.
+ #
+ # @!attribute [rw] arn
+ # The Amazon Braket resource arn.
+ # @return [String]
+ #
+ # @!attribute [rw] type
+ # The association type for the specified Amazon Braket resource arn.
+ # @return [String]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/Association AWS API Documentation
+ #
+ class Association < Struct.new(
+ :arn,
+ :type)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# @!attribute [rw] job_arn
# The ARN of the Amazon Braket job to cancel.
# @return [String]
@@ -144,6 +163,10 @@ class ContainerImage < Struct.new(
# scripts used for entry and training.
# @return [Types::AlgorithmSpecification]
#
+ # @!attribute [rw] associations
+ # The list of Amazon Braket resources associated with the hybrid job.
+ # @return [Array]
+ #
# @!attribute [rw] checkpoint_config
# Information about the output locations for job checkpoint data.
# @return [Types::JobCheckpointConfig]
@@ -207,6 +230,7 @@ class ContainerImage < Struct.new(
#
class CreateJobRequest < Struct.new(
:algorithm_specification,
+ :associations,
:checkpoint_config,
:client_token,
:device_config,
@@ -238,6 +262,11 @@ class CreateJobResponse < Struct.new(
# The action associated with the task.
# @return [String]
#
+ # @!attribute [rw] associations
+ # The list of Amazon Braket resources associated with the quantum
+ # task.
+ # @return [Array]
+ #
# @!attribute [rw] client_token
# The client token associated with the request.
#
@@ -279,6 +308,7 @@ class CreateJobResponse < Struct.new(
#
class CreateQuantumTaskRequest < Struct.new(
:action,
+ :associations,
:client_token,
:device_arn,
:device_parameters,
@@ -498,6 +528,10 @@ class GetJobRequest < Struct.new(
# the job.
# @return [Types::AlgorithmSpecification]
#
+ # @!attribute [rw] associations
+ # The list of Amazon Braket resources associated with the hybrid job.
+ # @return [Array]
+ #
# @!attribute [rw] billable_duration
# The billable time the Amazon Braket job used to complete.
# @return [Integer]
@@ -593,6 +627,7 @@ class GetJobRequest < Struct.new(
#
class GetJobResponse < Struct.new(
:algorithm_specification,
+ :associations,
:billable_duration,
:checkpoint_config,
:created_at,
@@ -621,7 +656,7 @@ class GetJobResponse < Struct.new(
# @return [Array]
#
# @!attribute [rw] quantum_task_arn
- # the ARN of the task to retrieve.
+ # The ARN of the task to retrieve.
# @return [String]
#
# @see http://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/GetQuantumTaskRequest AWS API Documentation
@@ -633,6 +668,11 @@ class GetQuantumTaskRequest < Struct.new(
include Aws::Structure
end
+ # @!attribute [rw] associations
+ # The list of Amazon Braket resources associated with the quantum
+ # task.
+ # @return [Array]
+ #
# @!attribute [rw] created_at
# The time at which the task was created.
# @return [Time]
@@ -690,6 +730,7 @@ class GetQuantumTaskRequest < Struct.new(
# @see http://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/GetQuantumTaskResponse AWS API Documentation
#
class GetQuantumTaskResponse < Struct.new(
+ :associations,
:created_at,
:device_arn,
:device_parameters,
@@ -834,7 +875,7 @@ class JobCheckpointConfig < Struct.new(
# @return [String]
#
# @!attribute [rw] time_of_event
- # TThe type of event that occurred related to the Amazon Braket job.
+ # The type of event that occurred related to the Amazon Braket job.
# @return [Time]
#
# @see http://docs.aws.amazon.com/goto/WebAPI/braket-2019-09-01/JobEventDetails AWS API Documentation
diff --git a/gems/aws-sdk-cloud9/CHANGELOG.md b/gems/aws-sdk-cloud9/CHANGELOG.md
index f5e50079d93..97af0c7b284 100644
--- a/gems/aws-sdk-cloud9/CHANGELOG.md
+++ b/gems/aws-sdk-cloud9/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.64.0 (2023-12-04)
+------------------
+
+* Feature - This release adds the requirement to include the imageId parameter in the CreateEnvironmentEC2 API call.
+
1.63.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-cloud9/VERSION b/gems/aws-sdk-cloud9/VERSION
index af92bdd9f58..9405730420f 100644
--- a/gems/aws-sdk-cloud9/VERSION
+++ b/gems/aws-sdk-cloud9/VERSION
@@ -1 +1 @@
-1.63.0
+1.64.0
diff --git a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9.rb b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9.rb
index 2d791265ee5..b114128becc 100644
--- a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9.rb
+++ b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9.rb
@@ -52,6 +52,6 @@
# @!group service
module Aws::Cloud9
- GEM_VERSION = '1.63.0'
+ GEM_VERSION = '1.64.0'
end
diff --git a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client.rb b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client.rb
index 175f8891f7b..6dd4f0ac9e3 100644
--- a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client.rb
+++ b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client.rb
@@ -430,7 +430,7 @@ def initialize(*args)
# The ID of the subnet in Amazon VPC that Cloud9 will use to communicate
# with the Amazon EC2 instance.
#
- # @option params [String] :image_id
+ # @option params [required, String] :image_id
# The identifier for the Amazon Machine Image (AMI) that's used to
# create the EC2 instance. To choose an AMI for the instance, you must
# specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM)
@@ -456,7 +456,7 @@ def initialize(*args)
#
# AMI aliases
#
- # * Amazon Linux (default): `amazonlinux-1-x86_64`
+ # * Amazon Linux: `amazonlinux-1-x86_64`
#
# * Amazon Linux 2: `amazonlinux-2-x86_64`
#
@@ -466,7 +466,7 @@ def initialize(*args)
#
# **SSM paths**
#
- # * Amazon Linux (default):
+ # * Amazon Linux:
# `resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64`
#
# * Amazon Linux 2:
@@ -539,7 +539,7 @@ def initialize(*args)
# client_request_token: "ClientRequestToken",
# instance_type: "InstanceType", # required
# subnet_id: "SubnetId",
- # image_id: "ImageId",
+ # image_id: "ImageId", # required
# automatic_stop_time_minutes: 1,
# owner_arn: "UserArn",
# tags: [
@@ -1266,7 +1266,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-cloud9'
- context[:gem_version] = '1.63.0'
+ context[:gem_version] = '1.64.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client_api.rb b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client_api.rb
index c1f95bfe8c1..3f26a719be6 100644
--- a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client_api.rb
+++ b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/client_api.rb
@@ -96,7 +96,7 @@ module ClientApi
CreateEnvironmentEC2Request.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "clientRequestToken"))
CreateEnvironmentEC2Request.add_member(:instance_type, Shapes::ShapeRef.new(shape: InstanceType, required: true, location_name: "instanceType"))
CreateEnvironmentEC2Request.add_member(:subnet_id, Shapes::ShapeRef.new(shape: SubnetId, location_name: "subnetId"))
- CreateEnvironmentEC2Request.add_member(:image_id, Shapes::ShapeRef.new(shape: ImageId, location_name: "imageId"))
+ CreateEnvironmentEC2Request.add_member(:image_id, Shapes::ShapeRef.new(shape: ImageId, required: true, location_name: "imageId"))
CreateEnvironmentEC2Request.add_member(:automatic_stop_time_minutes, Shapes::ShapeRef.new(shape: AutomaticStopTimeMinutes, location_name: "automaticStopTimeMinutes"))
CreateEnvironmentEC2Request.add_member(:owner_arn, Shapes::ShapeRef.new(shape: UserArn, location_name: "ownerArn"))
CreateEnvironmentEC2Request.add_member(:tags, Shapes::ShapeRef.new(shape: TagList, location_name: "tags"))
diff --git a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/types.rb b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/types.rb
index d4ba338c067..7c9531283dd 100644
--- a/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/types.rb
+++ b/gems/aws-sdk-cloud9/lib/aws-sdk-cloud9/types.rb
@@ -87,7 +87,7 @@ class ConflictException < Aws::EmptyStructure; end
#
# AMI aliases
#
- # * Amazon Linux (default): `amazonlinux-1-x86_64`
+ # * Amazon Linux: `amazonlinux-1-x86_64`
#
# * Amazon Linux 2: `amazonlinux-2-x86_64`
#
@@ -97,7 +97,7 @@ class ConflictException < Aws::EmptyStructure; end
#
# **SSM paths**
#
- # * Amazon Linux (default):
+ # * Amazon Linux:
# `resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64`
#
# * Amazon Linux 2:
diff --git a/gems/aws-sdk-cloudformation/CHANGELOG.md b/gems/aws-sdk-cloudformation/CHANGELOG.md
index a3027b968f1..ef29dad158d 100644
--- a/gems/aws-sdk-cloudformation/CHANGELOG.md
+++ b/gems/aws-sdk-cloudformation/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.96.0 (2023-12-04)
+------------------
+
+* Feature - Including UPDATE_* states as a success status for CreateStack waiter.
+
1.95.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-cloudformation/VERSION b/gems/aws-sdk-cloudformation/VERSION
index 55f6ae93382..9141007a558 100644
--- a/gems/aws-sdk-cloudformation/VERSION
+++ b/gems/aws-sdk-cloudformation/VERSION
@@ -1 +1 @@
-1.95.0
+1.96.0
diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb
index d5916ba11bb..17d2a7bbc63 100644
--- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb
+++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation.rb
@@ -57,6 +57,6 @@
# @!group service
module Aws::CloudFormation
- GEM_VERSION = '1.95.0'
+ GEM_VERSION = '1.96.0'
end
diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb
index e1c8fc0f50e..a2d6b44282d 100644
--- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb
+++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/client.rb
@@ -7221,7 +7221,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-cloudformation'
- context[:gem_version] = '1.95.0'
+ context[:gem_version] = '1.96.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/waiters.rb b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/waiters.rb
index bce472b2938..2325e23e33d 100644
--- a/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/waiters.rb
+++ b/gems/aws-sdk-cloudformation/lib/aws-sdk-cloudformation/waiters.rb
@@ -153,6 +153,54 @@ def initialize(options)
"matcher" => "pathAll",
"state" => "success"
},
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_COMPLETE",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_IN_PROGRESS",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_FAILED",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_ROLLBACK_IN_PROGRESS",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_ROLLBACK_FAILED",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
+ {
+ "argument" => "stacks[].stack_status",
+ "expected" => "UPDATE_ROLLBACK_COMPLETE",
+ "matcher" => "pathAll",
+ "state" => "success"
+ },
{
"argument" => "stacks[].stack_status",
"expected" => "CREATE_FAILED",
diff --git a/gems/aws-sdk-finspace/CHANGELOG.md b/gems/aws-sdk-finspace/CHANGELOG.md
index 41ccd64ca6a..bafed4bc4a8 100644
--- a/gems/aws-sdk-finspace/CHANGELOG.md
+++ b/gems/aws-sdk-finspace/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.26.0 (2023-12-04)
+------------------
+
+* Feature - Release General Purpose type clusters
+
1.25.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-finspace/VERSION b/gems/aws-sdk-finspace/VERSION
index ad2191947f7..5ff8c4f5d2a 100644
--- a/gems/aws-sdk-finspace/VERSION
+++ b/gems/aws-sdk-finspace/VERSION
@@ -1 +1 @@
-1.25.0
+1.26.0
diff --git a/gems/aws-sdk-finspace/lib/aws-sdk-finspace.rb b/gems/aws-sdk-finspace/lib/aws-sdk-finspace.rb
index c13fcc6911d..3dc1e4eef21 100644
--- a/gems/aws-sdk-finspace/lib/aws-sdk-finspace.rb
+++ b/gems/aws-sdk-finspace/lib/aws-sdk-finspace.rb
@@ -52,6 +52,6 @@
# @!group service
module Aws::Finspace
- GEM_VERSION = '1.25.0'
+ GEM_VERSION = '1.26.0'
end
diff --git a/gems/aws-sdk-finspace/lib/aws-sdk-finspace/client.rb b/gems/aws-sdk-finspace/lib/aws-sdk-finspace/client.rb
index 86492e7866d..75c9cf3b073 100644
--- a/gems/aws-sdk-finspace/lib/aws-sdk-finspace/client.rb
+++ b/gems/aws-sdk-finspace/lib/aws-sdk-finspace/client.rb
@@ -490,30 +490,51 @@ def create_environment(params = {}, options = {})
#
# @option params [required, Array] :change_requests
# A list of change request objects that are run in order. A change
- # request object consists of changeType , s3Path, and a dbPath. A
+ # request object consists of `changeType` , `s3Path`, and `dbPath`. A
# changeType can has the following values:
#
# * PUT – Adds or updates files in a database.
#
# * DELETE – Deletes files in a database.
#
- # All the change requests require a mandatory *dbPath* attribute that
- # defines the path within the database directory. The *s3Path* attribute
- # defines the s3 source file path and is required for a PUT change type.
+ # All the change requests require a mandatory `dbPath` attribute that
+ # defines the path within the database directory. All database paths
+ # must start with a leading / and end with a trailing /. The `s3Path`
+ # attribute defines the s3 source file path and is required for a PUT
+ # change type. The `s3path` must end with a trailing / if it is a
+ # directory and must end without a trailing / if it is a file.
#
- # Here is an example of how you can use the change request object:
+ # Here are few examples of how you can use the change request object:
#
- # `[ \{ "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/",
- # "dbPath":"/2020.01.02/"\}, \{ "changeType": "PUT",
- # "s3Path":"s3://bucket/db/sym", "dbPath":"/"\}, \{ "changeType":
- # "DELETE", "dbPath": "/2020.01.01/"\} ]`
+ # 1. This request adds a single sym file at database root location.
#
- # In this example, the first request with *PUT* change type allows you
- # to add files in the given s3Path under the *2020.01.02* partition of
- # the database. The second request with *PUT* change type allows you to
- # add a single sym file at database root location. The last request with
- # *DELETE* change type allows you to delete the files under the
- # *2020.01.01* partition of the database.
+ # `\{ "changeType": "PUT", "s3Path":"s3://bucket/db/sym",
+ # "dbPath":"/"\}`
+ #
+ # 2. This request adds files in the given `s3Path` under the 2020.01.02
+ # partition of the database.
+ #
+ # `\{ "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/",
+ # "dbPath":"/2020.01.02/"\}`
+ #
+ # 3. This request adds files in the given `s3Path` under the *taq*
+ # table partition of the database.
+ #
+ # `[ \{ "changeType": "PUT",
+ # "s3Path":"s3://bucket/db/2020.01.02/taq/",
+ # "dbPath":"/2020.01.02/taq/"\}]`
+ #
+ # 4. This request deletes the 2020.01.02 partition of the database.
+ #
+ # `[\{ "changeType": "DELETE", "dbPath": "/2020.01.02/"\} ]`
+ #
+ # 5. The *DELETE* request allows you to delete the existing files under
+ # the 2020.01.02 partition of the database, and the *PUT* request
+ # adds a new taq table under it.
+ #
+ # `[ \{"changeType": "DELETE", "dbPath":"/2020.01.02/"\},
+ # \{"changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/",
+ # "dbPath":"/2020.01.02/taq/"\}]`
#
# @option params [required, String] :client_token
# A token that ensures idempotency. This token expires in 10 minutes.
@@ -606,6 +627,13 @@ def create_kx_changeset(params = {}, options = {})
# logic using the initialization scripts and custom code. This type of
# cluster does not require a writable local storage.
#
+ # * GP – A general purpose cluster allows you to quickly iterate on code
+ # during development by granting greater access to system commands and
+ # enabling a fast reload of custom code. This cluster type can
+ # optionally mount databases including cache and savedown storage. For
+ # this cluster type, the node count is fixed at 1. It does not support
+ # autoscaling and supports only `SINGLE` AZ mode.
+ #
# @option params [Array] :databases
# A list of databases that will be available for querying.
#
@@ -703,7 +731,7 @@ def create_kx_changeset(params = {}, options = {})
# client_token: "ClientToken",
# environment_id: "KxEnvironmentId", # required
# cluster_name: "KxClusterName", # required
- # cluster_type: "HDB", # required, accepts HDB, RDB, GATEWAY
+ # cluster_type: "HDB", # required, accepts HDB, RDB, GATEWAY, GP
# databases: [
# {
# database_name: "DatabaseName", # required
@@ -772,7 +800,7 @@ def create_kx_changeset(params = {}, options = {})
# resp.status #=> String, one of "PENDING", "CREATING", "CREATE_FAILED", "RUNNING", "UPDATING", "DELETING", "DELETED", "DELETE_FAILED"
# resp.status_reason #=> String
# resp.cluster_name #=> String
- # resp.cluster_type #=> String, one of "HDB", "RDB", "GATEWAY"
+ # resp.cluster_type #=> String, one of "HDB", "RDB", "GATEWAY", "GP"
# resp.databases #=> Array
# resp.databases[0].database_name #=> String
# resp.databases[0].cache_configurations #=> Array
@@ -1282,7 +1310,7 @@ def get_kx_changeset(params = {}, options = {})
# resp.status #=> String, one of "PENDING", "CREATING", "CREATE_FAILED", "RUNNING", "UPDATING", "DELETING", "DELETED", "DELETE_FAILED"
# resp.status_reason #=> String
# resp.cluster_name #=> String
- # resp.cluster_type #=> String, one of "HDB", "RDB", "GATEWAY"
+ # resp.cluster_type #=> String, one of "HDB", "RDB", "GATEWAY", "GP"
# resp.databases #=> Array
# resp.databases[0].database_name #=> String
# resp.databases[0].cache_configurations #=> Array
@@ -1715,6 +1743,13 @@ def list_kx_cluster_nodes(params = {}, options = {})
# logic using the initialization scripts and custom code. This type of
# cluster does not require a writable local storage.
#
+ # * GP – A general purpose cluster allows you to quickly iterate on code
+ # during development by granting greater access to system commands and
+ # enabling a fast reload of custom code. This cluster type can
+ # optionally mount databases including cache and savedown storage. For
+ # this cluster type, the node count is fixed at 1. It does not support
+ # autoscaling and supports only `SINGLE` AZ mode.
+ #
# @option params [Integer] :max_results
# The maximum number of results to return in this request.
#
@@ -1730,7 +1765,7 @@ def list_kx_cluster_nodes(params = {}, options = {})
#
# resp = client.list_kx_clusters({
# environment_id: "KxEnvironmentId", # required
- # cluster_type: "HDB", # accepts HDB, RDB, GATEWAY
+ # cluster_type: "HDB", # accepts HDB, RDB, GATEWAY, GP
# max_results: 1,
# next_token: "PaginationToken",
# })
@@ -1741,7 +1776,7 @@ def list_kx_cluster_nodes(params = {}, options = {})
# resp.kx_cluster_summaries[0].status #=> String, one of "PENDING", "CREATING", "CREATE_FAILED", "RUNNING", "UPDATING", "DELETING", "DELETED", "DELETE_FAILED"
# resp.kx_cluster_summaries[0].status_reason #=> String
# resp.kx_cluster_summaries[0].cluster_name #=> String
- # resp.kx_cluster_summaries[0].cluster_type #=> String, one of "HDB", "RDB", "GATEWAY"
+ # resp.kx_cluster_summaries[0].cluster_type #=> String, one of "HDB", "RDB", "GATEWAY", "GP"
# resp.kx_cluster_summaries[0].cluster_description #=> String
# resp.kx_cluster_summaries[0].release_label #=> String
# resp.kx_cluster_summaries[0].initialization_script #=> String
@@ -2101,10 +2136,14 @@ def update_environment(params = {}, options = {})
# will be loaded on the cluster. It must include the file name itself.
# For example, `somedir/init.q`.
#
+ # You cannot update this parameter for a `NO_RESTART` deployment.
+ #
# @option params [Array] :command_line_arguments
# Specifies the key-value pairs to make them available inside the
# cluster.
#
+ # You cannot update this parameter for a `NO_RESTART` deployment.
+ #
# @option params [Types::KxClusterCodeDeploymentConfiguration] :deployment_configuration
# The configuration that allows you to choose how you want to update the
# code on a cluster.
@@ -2130,7 +2169,7 @@ def update_environment(params = {}, options = {})
# },
# ],
# deployment_configuration: {
- # deployment_strategy: "ROLLING", # required, accepts ROLLING, FORCE
+ # deployment_strategy: "NO_RESTART", # required, accepts NO_RESTART, ROLLING, FORCE
# },
# })
#
@@ -2512,7 +2551,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-finspace'
- context[:gem_version] = '1.25.0'
+ context[:gem_version] = '1.26.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-finspace/lib/aws-sdk-finspace/types.rb b/gems/aws-sdk-finspace/lib/aws-sdk-finspace/types.rb
index ca08f534995..51c4597b79d 100644
--- a/gems/aws-sdk-finspace/lib/aws-sdk-finspace/types.rb
+++ b/gems/aws-sdk-finspace/lib/aws-sdk-finspace/types.rb
@@ -292,31 +292,52 @@ class CreateEnvironmentResponse < Struct.new(
#
# @!attribute [rw] change_requests
# A list of change request objects that are run in order. A change
- # request object consists of changeType , s3Path, and a dbPath. A
+ # request object consists of `changeType` , `s3Path`, and `dbPath`. A
# changeType can has the following values:
#
# * PUT – Adds or updates files in a database.
#
# * DELETE – Deletes files in a database.
#
- # All the change requests require a mandatory *dbPath* attribute that
- # defines the path within the database directory. The *s3Path*
+ # All the change requests require a mandatory `dbPath` attribute that
+ # defines the path within the database directory. All database paths
+ # must start with a leading / and end with a trailing /. The `s3Path`
# attribute defines the s3 source file path and is required for a PUT
- # change type.
+ # change type. The `s3path` must end with a trailing / if it is a
+ # directory and must end without a trailing / if it is a file.
#
- # Here is an example of how you can use the change request object:
+ # Here are few examples of how you can use the change request object:
#
- # `[ \{ "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/",
- # "dbPath":"/2020.01.02/"\}, \{ "changeType": "PUT",
- # "s3Path":"s3://bucket/db/sym", "dbPath":"/"\}, \{ "changeType":
- # "DELETE", "dbPath": "/2020.01.01/"\} ]`
+ # 1. This request adds a single sym file at database root location.
#
- # In this example, the first request with *PUT* change type allows you
- # to add files in the given s3Path under the *2020.01.02* partition of
- # the database. The second request with *PUT* change type allows you
- # to add a single sym file at database root location. The last request
- # with *DELETE* change type allows you to delete the files under the
- # *2020.01.01* partition of the database.
+ # `\{ "changeType": "PUT", "s3Path":"s3://bucket/db/sym",
+ # "dbPath":"/"\}`
+ #
+ # 2. This request adds files in the given `s3Path` under the
+ # 2020.01.02 partition of the database.
+ #
+ # `\{ "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/",
+ # "dbPath":"/2020.01.02/"\}`
+ #
+ # 3. This request adds files in the given `s3Path` under the *taq*
+ # table partition of the database.
+ #
+ # `[ \{ "changeType": "PUT",
+ # "s3Path":"s3://bucket/db/2020.01.02/taq/",
+ # "dbPath":"/2020.01.02/taq/"\}]`
+ #
+ # 4. This request deletes the 2020.01.02 partition of the database.
+ #
+ # `[\{ "changeType": "DELETE", "dbPath": "/2020.01.02/"\} ]`
+ #
+ # 5. The *DELETE* request allows you to delete the existing files
+ # under the 2020.01.02 partition of the database, and the *PUT*
+ # request adds a new taq table under it.
+ #
+ # `[ \{"changeType": "DELETE", "dbPath":"/2020.01.02/"\},
+ # \{"changeType": "PUT",
+ # "s3Path":"s3://bucket/db/2020.01.02/taq/",
+ # "dbPath":"/2020.01.02/taq/"\}]`
# @return [Array]
#
# @!attribute [rw] client_token
@@ -434,6 +455,13 @@ class CreateKxChangesetResponse < Struct.new(
# processes in kdb systems. It allows you to create your own routing
# logic using the initialization scripts and custom code. This type
# of cluster does not require a writable local storage.
+ #
+ # * GP – A general purpose cluster allows you to quickly iterate on
+ # code during development by granting greater access to system
+ # commands and enabling a fast reload of custom code. This cluster
+ # type can optionally mount databases including cache and savedown
+ # storage. For this cluster type, the node count is fixed at 1. It
+ # does not support autoscaling and supports only `SINGLE` AZ mode.
# @return [String]
#
# @!attribute [rw] databases
@@ -597,6 +625,13 @@ class CreateKxClusterRequest < Struct.new(
# processes in kdb systems. It allows you to create your own routing
# logic using the initialization scripts and custom code. This type
# of cluster does not require a writable local storage.
+ #
+ # * GP – A general purpose cluster allows you to quickly iterate on
+ # code during development by granting greater access to system
+ # commands and enabling a fast reload of custom code. This cluster
+ # type can optionally mount databases including cache and savedown
+ # storage. For this cluster type, the node count is fixed at 1. It
+ # does not support autoscaling and supports only `SINGLE` AZ mode.
# @return [String]
#
# @!attribute [rw] databases
@@ -1392,6 +1427,13 @@ class GetKxClusterRequest < Struct.new(
# processes in kdb systems. It allows you to create your own routing
# logic using the initialization scripts and custom code. This type
# of cluster does not require a writable local storage.
+ #
+ # * GP – A general purpose cluster allows you to quickly iterate on
+ # code during development by granting greater access to system
+ # commands and enabling a fast reload of custom code. This cluster
+ # type can optionally mount databases including cache and savedown
+ # storage. For this cluster type, the node count is fixed at 1. It
+ # does not support autoscaling and supports only `SINGLE` AZ mode.
# @return [String]
#
# @!attribute [rw] databases
@@ -1972,6 +2014,13 @@ class KxChangesetListEntry < Struct.new(
# processes in kdb systems. It allows you to create your own routing
# logic using the initialization scripts and custom code. This type
# of cluster does not require a writable local storage.
+ #
+ # * GP – A general purpose cluster allows you to quickly iterate on
+ # code during development by granting greater access to system
+ # commands and enabling a fast reload of custom code. This cluster
+ # type can optionally mount databases including cache and savedown
+ # storage. For this cluster type, the node count is fixed at 1. It
+ # does not support autoscaling and supports only `SINGLE` AZ mode.
# @return [String]
#
# @!attribute [rw] cluster_description
@@ -2050,6 +2099,14 @@ class KxCluster < Struct.new(
# * ROLLING – This options updates the cluster by stopping the exiting
# q process and starting a new q process with updated configuration.
#
+ # * NO\_RESTART – This option updates the cluster without stopping the
+ # running q process. It is only available for `GP` type cluster.
+ # This option is quicker as it reduces the turn around time to
+ # update configuration on a cluster.
+ #
+ # With this deployment mode, you cannot update the
+ # `initializationScript` and `commandLineArguments` parameters.
+ #
# * FORCE – This option updates the cluster by immediately stopping
# all the running processes before starting up new ones with the
# updated configuration.
@@ -2574,6 +2631,13 @@ class ListKxClusterNodesResponse < Struct.new(
# processes in kdb systems. It allows you to create your own routing
# logic using the initialization scripts and custom code. This type
# of cluster does not require a writable local storage.
+ #
+ # * GP – A general purpose cluster allows you to quickly iterate on
+ # code during development by granting greater access to system
+ # commands and enabling a fast reload of custom code. This cluster
+ # type can optionally mount databases including cache and savedown
+ # storage. For this cluster type, the node count is fixed at 1. It
+ # does not support autoscaling and supports only `SINGLE` AZ mode.
# @return [String]
#
# @!attribute [rw] max_results
@@ -3042,11 +3106,15 @@ class UpdateEnvironmentResponse < Struct.new(
# a relative path within *.zip* file that contains the custom code,
# which will be loaded on the cluster. It must include the file name
# itself. For example, `somedir/init.q`.
+ #
+ # You cannot update this parameter for a `NO_RESTART` deployment.
# @return [String]
#
# @!attribute [rw] command_line_arguments
# Specifies the key-value pairs to make them available inside the
# cluster.
+ #
+ # You cannot update this parameter for a `NO_RESTART` deployment.
# @return [Array]
#
# @!attribute [rw] deployment_configuration
diff --git a/gems/aws-sdk-medialive/CHANGELOG.md b/gems/aws-sdk-medialive/CHANGELOG.md
index f5acb684206..89df98493af 100644
--- a/gems/aws-sdk-medialive/CHANGELOG.md
+++ b/gems/aws-sdk-medialive/CHANGELOG.md
@@ -1,6 +1,11 @@
Unreleased Changes
------------------
+1.113.0 (2023-12-04)
+------------------
+
+* Feature - Adds support for custom color correction on channels using 3D LUT files.
+
1.112.0 (2023-11-28)
------------------
diff --git a/gems/aws-sdk-medialive/VERSION b/gems/aws-sdk-medialive/VERSION
index 628cac6f078..7bd0f252f61 100644
--- a/gems/aws-sdk-medialive/VERSION
+++ b/gems/aws-sdk-medialive/VERSION
@@ -1 +1 @@
-1.112.0
+1.113.0
diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb
index 34e02b16145..526d89a25ca 100644
--- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb
+++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive.rb
@@ -53,6 +53,6 @@
# @!group service
module Aws::MediaLive
- GEM_VERSION = '1.112.0'
+ GEM_VERSION = '1.113.0'
end
diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb
index 7dee8eb1af8..79d0025c823 100644
--- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb
+++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client.rb
@@ -1536,6 +1536,10 @@ def claim_device(params = {}, options = {})
# resp.channel.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.channel.encoder_settings.video_descriptions[0].width #=> Integer
# resp.channel.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.channel.id #=> String
# resp.channel.input_attachments #=> Array
# resp.channel.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -2636,6 +2640,10 @@ def create_tags(params = {}, options = {})
# resp.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.encoder_settings.video_descriptions[0].width #=> Integer
# resp.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.id #=> String
# resp.input_attachments #=> Array
# resp.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -3628,6 +3636,10 @@ def describe_account_configuration(params = {}, options = {})
# resp.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.encoder_settings.video_descriptions[0].width #=> Integer
# resp.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.id #=> String
# resp.input_attachments #=> Array
# resp.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -5750,6 +5762,10 @@ def reject_input_device_transfer(params = {}, options = {})
# resp.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.encoder_settings.video_descriptions[0].width #=> Integer
# resp.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.id #=> String
# resp.input_attachments #=> Array
# resp.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -6554,6 +6570,10 @@ def start_multiplex(params = {}, options = {})
# resp.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.encoder_settings.video_descriptions[0].width #=> Integer
# resp.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.id #=> String
# resp.input_attachments #=> Array
# resp.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -7386,6 +7406,10 @@ def update_account_configuration(params = {}, options = {})
# resp.channel.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.channel.encoder_settings.video_descriptions[0].width #=> Integer
# resp.channel.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.channel.id #=> String
# resp.channel.input_attachments #=> Array
# resp.channel.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -8098,6 +8122,10 @@ def update_channel(params = {}, options = {})
# resp.channel.encoder_settings.video_descriptions[0].sharpness #=> Integer
# resp.channel.encoder_settings.video_descriptions[0].width #=> Integer
# resp.channel.encoder_settings.thumbnail_configuration.state #=> String, one of "AUTO", "DISABLED"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections #=> Array
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].input_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].output_color_space #=> String, one of "HDR10", "HLG_2020", "REC_601", "REC_709"
+ # resp.channel.encoder_settings.color_correction_settings.global_color_corrections[0].uri #=> String
# resp.channel.id #=> String
# resp.channel.input_attachments #=> Array
# resp.channel.input_attachments[0].automatic_input_failover_settings.error_clear_time_msec #=> Integer
@@ -8669,7 +8697,7 @@ def build_request(operation_name, params = {})
params: params,
config: config)
context[:gem_name] = 'aws-sdk-medialive'
- context[:gem_version] = '1.112.0'
+ context[:gem_version] = '1.113.0'
Seahorse::Client::Request.new(handlers, context)
end
diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb
index 56bcd7a1fc6..b36a6a89586 100644
--- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb
+++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/client_api.rb
@@ -124,6 +124,9 @@ module ClientApi
ChannelSummary = Shapes::StructureShape.new(name: 'ChannelSummary')
ClaimDeviceRequest = Shapes::StructureShape.new(name: 'ClaimDeviceRequest')
ClaimDeviceResponse = Shapes::StructureShape.new(name: 'ClaimDeviceResponse')
+ ColorCorrection = Shapes::StructureShape.new(name: 'ColorCorrection')
+ ColorCorrectionSettings = Shapes::StructureShape.new(name: 'ColorCorrectionSettings')
+ ColorSpace = Shapes::StringShape.new(name: 'ColorSpace')
ColorSpacePassthroughSettings = Shapes::StructureShape.new(name: 'ColorSpacePassthroughSettings')
ConflictException = Shapes::StructureShape.new(name: 'ConflictException')
ContentType = Shapes::StringShape.new(name: 'ContentType')
@@ -814,6 +817,7 @@ module ClientApi
__listOfCaptionSelector = Shapes::ListShape.new(name: '__listOfCaptionSelector')
__listOfChannelEgressEndpoint = Shapes::ListShape.new(name: '__listOfChannelEgressEndpoint')
__listOfChannelSummary = Shapes::ListShape.new(name: '__listOfChannelSummary')
+ __listOfColorCorrection = Shapes::ListShape.new(name: '__listOfColorCorrection')
__listOfFailoverCondition = Shapes::ListShape.new(name: '__listOfFailoverCondition')
__listOfHlsAdMarkers = Shapes::ListShape.new(name: '__listOfHlsAdMarkers')
__listOfInput = Shapes::ListShape.new(name: '__listOfInput')
@@ -1258,6 +1262,14 @@ module ClientApi
ClaimDeviceResponse.struct_class = Types::ClaimDeviceResponse
+ ColorCorrection.add_member(:input_color_space, Shapes::ShapeRef.new(shape: ColorSpace, required: true, location_name: "inputColorSpace"))
+ ColorCorrection.add_member(:output_color_space, Shapes::ShapeRef.new(shape: ColorSpace, required: true, location_name: "outputColorSpace"))
+ ColorCorrection.add_member(:uri, Shapes::ShapeRef.new(shape: __string, required: true, location_name: "uri"))
+ ColorCorrection.struct_class = Types::ColorCorrection
+
+ ColorCorrectionSettings.add_member(:global_color_corrections, Shapes::ShapeRef.new(shape: __listOfColorCorrection, required: true, location_name: "globalColorCorrections"))
+ ColorCorrectionSettings.struct_class = Types::ColorCorrectionSettings
+
ColorSpacePassthroughSettings.struct_class = Types::ColorSpacePassthroughSettings
ConflictException.add_member(:message, Shapes::ShapeRef.new(shape: __string, location_name: "message"))
@@ -1775,6 +1787,7 @@ module ClientApi
EncoderSettings.add_member(:timecode_config, Shapes::ShapeRef.new(shape: TimecodeConfig, required: true, location_name: "timecodeConfig"))
EncoderSettings.add_member(:video_descriptions, Shapes::ShapeRef.new(shape: __listOfVideoDescription, required: true, location_name: "videoDescriptions"))
EncoderSettings.add_member(:thumbnail_configuration, Shapes::ShapeRef.new(shape: ThumbnailConfiguration, location_name: "thumbnailConfiguration"))
+ EncoderSettings.add_member(:color_correction_settings, Shapes::ShapeRef.new(shape: ColorCorrectionSettings, location_name: "colorCorrectionSettings"))
EncoderSettings.struct_class = Types::EncoderSettings
EpochLockingSettings.add_member(:custom_epoch, Shapes::ShapeRef.new(shape: __string, location_name: "customEpoch"))
@@ -3481,6 +3494,8 @@ module ClientApi
__listOfChannelSummary.member = Shapes::ShapeRef.new(shape: ChannelSummary)
+ __listOfColorCorrection.member = Shapes::ShapeRef.new(shape: ColorCorrection)
+
__listOfFailoverCondition.member = Shapes::ShapeRef.new(shape: FailoverCondition)
__listOfHlsAdMarkers.member = Shapes::ShapeRef.new(shape: HlsAdMarkers)
diff --git a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb
index 3233e639863..56c32880a40 100644
--- a/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb
+++ b/gems/aws-sdk-medialive/lib/aws-sdk-medialive/types.rb
@@ -1981,6 +1981,55 @@ class ClaimDeviceRequest < Struct.new(
#
class ClaimDeviceResponse < Aws::EmptyStructure; end
+ # Property of ColorCorrectionSettings. Used for custom color space
+ # conversion. The object identifies one 3D LUT file and specifies the
+ # input/output color space combination that the file will be used for.
+ #
+ # @!attribute [rw] input_color_space
+ # The color space of the input.
+ # @return [String]
+ #
+ # @!attribute [rw] output_color_space
+ # The color space of the output.
+ # @return [String]
+ #
+ # @!attribute [rw] uri
+ # The URI of the 3D LUT file. The protocol must be 's3:' or
+ # 's3ssl:':.
+ # @return [String]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ColorCorrection AWS API Documentation
+ #
+ class ColorCorrection < Struct.new(
+ :input_color_space,
+ :output_color_space,
+ :uri)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
+ # Property of encoderSettings. Controls color conversion when you are
+ # using 3D LUT files to perform color conversion on video.
+ #
+ # @!attribute [rw] global_color_corrections
+ # An array of colorCorrections that applies when you are using 3D LUT
+ # files to perform color conversion on video. Each colorCorrection
+ # contains one 3D LUT file (that defines the color mapping for
+ # converting an input color space to an output color space), and the
+ # input/output combination that this 3D LUT file applies to. MediaLive
+ # reads the color space in the input metadata, determines the color
+ # space that you have specified for the output, and finds and uses the
+ # LUT file that applies to this combination.
+ # @return [Array]
+ #
+ # @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ColorCorrectionSettings AWS API Documentation
+ #
+ class ColorCorrectionSettings < Struct.new(
+ :global_color_corrections)
+ SENSITIVE = []
+ include Aws::Structure
+ end
+
# Passthrough applies no color space conversion to the output
#
# @api private
@@ -4308,6 +4357,10 @@ class Empty < Aws::EmptyStructure; end
# Thumbnail configuration settings.
# @return [Types::ThumbnailConfiguration]
#
+ # @!attribute [rw] color_correction_settings
+ # Color correction settings
+ # @return [Types::ColorCorrectionSettings]
+ #
# @see http://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/EncoderSettings AWS API Documentation
#
class EncoderSettings < Struct.new(
@@ -4323,7 +4376,8 @@ class EncoderSettings < Struct.new(
:output_groups,
:timecode_config,
:video_descriptions,
- :thumbnail_configuration)
+ :thumbnail_configuration,
+ :color_correction_settings)
SENSITIVE = []
include Aws::Structure
end