From 5afe923d1106eef4e16613501da15e8f998fc469 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Oct 2024 18:12:37 +0000 Subject: [PATCH 1/4] Merge customizations for Redshift Data --- .../redshift-data/2019-12-20/paginators-1.sdk-extras.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/botocore/data/redshift-data/2019-12-20/paginators-1.sdk-extras.json b/botocore/data/redshift-data/2019-12-20/paginators-1.sdk-extras.json index d9b46b92d6..8a0601670e 100644 --- a/botocore/data/redshift-data/2019-12-20/paginators-1.sdk-extras.json +++ b/botocore/data/redshift-data/2019-12-20/paginators-1.sdk-extras.json @@ -8,6 +8,13 @@ "TotalNumRows" ] }, + "GetStatementResultV2": { + "non_aggregate_keys": [ + "ColumnMetadata", + "TotalNumRows", + "ResultFormat" + ] + }, "DescribeTable": { "non_aggregate_keys": [ "TableName" From 386ce4cec0274d2bf0497cc98913ec745f88183d Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Oct 2024 18:12:44 +0000 Subject: [PATCH 2/4] Update to latest models --- .../api-change-bedrock-56802.json | 5 + .../api-change-bedrockruntime-5342.json | 5 + .../api-change-cleanrooms-55004.json | 5 + .../api-change-iotfleetwise-54553.json | 5 + .../next-release/api-change-logs-88718.json | 5 + .../api-change-redshiftdata-42855.json | 5 + .../api-change-sagemaker-80215.json | 5 + .../bedrock-runtime/2023-09-30/service-2.json | 4 +- .../data/bedrock/2023-04-20/service-2.json | 184 ++++++++++++--- .../data/cleanrooms/2022-02-17/service-2.json | 214 +++++++++++++----- .../iotfleetwise/2021-06-17/service-2.json | 4 + botocore/data/logs/2014-03-28/service-2.json | 5 + .../2019-12-20/paginators-1.json | 5 + .../redshift-data/2019-12-20/service-2.json | 103 ++++++++- .../data/sagemaker/2017-07-24/service-2.json | 49 ++-- 15 files changed, 488 insertions(+), 115 deletions(-) create mode 100644 .changes/next-release/api-change-bedrock-56802.json create mode 100644 .changes/next-release/api-change-bedrockruntime-5342.json create mode 100644 .changes/next-release/api-change-cleanrooms-55004.json create mode 100644 .changes/next-release/api-change-iotfleetwise-54553.json create mode 100644 .changes/next-release/api-change-logs-88718.json create mode 100644 .changes/next-release/api-change-redshiftdata-42855.json create mode 100644 .changes/next-release/api-change-sagemaker-80215.json diff --git a/.changes/next-release/api-change-bedrock-56802.json b/.changes/next-release/api-change-bedrock-56802.json new file mode 100644 index 0000000000..63fd970317 --- /dev/null +++ b/.changes/next-release/api-change-bedrock-56802.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock``", + "description": "Update Application Inference Profile" +} diff --git a/.changes/next-release/api-change-bedrockruntime-5342.json b/.changes/next-release/api-change-bedrockruntime-5342.json new file mode 100644 index 0000000000..c2283d421f --- /dev/null +++ b/.changes/next-release/api-change-bedrockruntime-5342.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-runtime``", + "description": "Update Application Inference Profile" +} diff --git a/.changes/next-release/api-change-cleanrooms-55004.json b/.changes/next-release/api-change-cleanrooms-55004.json new file mode 100644 index 0000000000..fa2f734a81 --- /dev/null +++ b/.changes/next-release/api-change-cleanrooms-55004.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cleanrooms``", + "description": "This release adds the option for customers to configure analytics engine when creating a collaboration, and introduces the new SPARK analytics engine type in addition to maintaining the legacy CLEAN_ROOMS_SQL engine type." +} diff --git a/.changes/next-release/api-change-iotfleetwise-54553.json b/.changes/next-release/api-change-iotfleetwise-54553.json new file mode 100644 index 0000000000..e800629749 --- /dev/null +++ b/.changes/next-release/api-change-iotfleetwise-54553.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``iotfleetwise``", + "description": "Updated BatchCreateVehicle and BatchUpdateVehicle APIs: LimitExceededException has been added and the maximum number of vehicles in a batch has been set to 10 explicitly" +} diff --git a/.changes/next-release/api-change-logs-88718.json b/.changes/next-release/api-change-logs-88718.json new file mode 100644 index 0000000000..e9a4fbf78e --- /dev/null +++ b/.changes/next-release/api-change-logs-88718.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``logs``", + "description": "Added support for new optional baseline parameter in the UpdateAnomaly API. For UpdateAnomaly requests with baseline set to True, The anomaly behavior is then treated as baseline behavior. However, more severe occurrences of this behavior will still be reported as anomalies." +} diff --git a/.changes/next-release/api-change-redshiftdata-42855.json b/.changes/next-release/api-change-redshiftdata-42855.json new file mode 100644 index 0000000000..8bc1bdf230 --- /dev/null +++ b/.changes/next-release/api-change-redshiftdata-42855.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``redshift-data``", + "description": "Adding a new API GetStatementResultV2 that supports CSV formatted results from ExecuteStatement and BatchExecuteStatement calls." +} diff --git a/.changes/next-release/api-change-sagemaker-80215.json b/.changes/next-release/api-change-sagemaker-80215.json new file mode 100644 index 0000000000..2eb310f368 --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-80215.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "Adding `notebook-al2-v3` as allowed value to SageMaker NotebookInstance PlatformIdentifier attribute" +} diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index 70519f0141..5a6e8cce24 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -339,7 +339,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:inference-profile/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)" }, "ConverseMetrics":{ "type":"structure", @@ -1515,7 +1515,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:inference-profile/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+)))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|([a-zA-Z0-9-:.]+)" }, "InvokeModelRequest":{ "type":"structure", diff --git a/botocore/data/bedrock/2023-04-20/service-2.json b/botocore/data/bedrock/2023-04-20/service-2.json index b31712f6f9..0d2dadc280 100644 --- a/botocore/data/bedrock/2023-04-20/service-2.json +++ b/botocore/data/bedrock/2023-04-20/service-2.json @@ -94,6 +94,28 @@ ], "documentation":"

Creates a version of the guardrail. Use this API to create a snapshot of the guardrail when you are satisfied with a configuration, or to compare the configuration with another version.

" }, + "CreateInferenceProfile":{ + "name":"CreateInferenceProfile", + "http":{ + "method":"POST", + "requestUri":"/inference-profiles", + "responseCode":201 + }, + "input":{"shape":"CreateInferenceProfileRequest"}, + "output":{"shape":"CreateInferenceProfileResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"TooManyTagsException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

", + "idempotent":true + }, "CreateModelCopyJob":{ "name":"CreateModelCopyJob", "http":{ @@ -258,6 +280,26 @@ "documentation":"

Deletes a custom model that you imported earlier. For more information, see Import a customized model in the Amazon Bedrock User Guide.

", "idempotent":true }, + "DeleteInferenceProfile":{ + "name":"DeleteInferenceProfile", + "http":{ + "method":"DELETE", + "requestUri":"/inference-profiles/{inferenceProfileIdentifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteInferenceProfileRequest"}, + "output":{"shape":"DeleteInferenceProfileResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

", + "idempotent":true + }, "DeleteModelInvocationLoggingConfiguration":{ "name":"DeleteModelInvocationLoggingConfiguration", "http":{ @@ -401,7 +443,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Gets information about an inference profile. For more information, see the Amazon Bedrock User Guide.

" + "documentation":"

Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

" }, "GetModelCopyJob":{ "name":"GetModelCopyJob", @@ -610,7 +652,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a list of inference profiles that you can use.

" + "documentation":"

Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide.

" }, "ListModelCopyJobs":{ "name":"ListModelCopyJobs", @@ -714,7 +756,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

List the tags associated with the specified resource.

For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

List the tags associated with the specified resource.

For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "PutModelInvocationLoggingConfiguration":{ "name":"PutModelInvocationLoggingConfiguration", @@ -809,7 +851,7 @@ {"shape":"TooManyTagsException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -827,7 +869,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + "documentation":"

Remove one or more tags from a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" }, "UpdateGuardrail":{ "name":"UpdateGuardrail", @@ -1226,6 +1268,50 @@ } } }, + "CreateInferenceProfileRequest":{ + "type":"structure", + "required":[ + "inferenceProfileName", + "modelSource" + ], + "members":{ + "inferenceProfileName":{ + "shape":"InferenceProfileName", + "documentation":"

A name for the inference profile.

" + }, + "description":{ + "shape":"InferenceProfileDescription", + "documentation":"

A description for the inference profile.

" + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "modelSource":{ + "shape":"InferenceProfileModelSource", + "documentation":"

The foundation model or system-defined inference profile that the inference profile will track metrics and costs for.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

An array of objects, each of which contains a tag and its value. For more information, see Tagging resources in the Amazon Bedrock User Guide.

" + } + } + }, + "CreateInferenceProfileResponse":{ + "type":"structure", + "required":["inferenceProfileArn"], + "members":{ + "inferenceProfileArn":{ + "shape":"InferenceProfileArn", + "documentation":"

The ARN of the inference profile that you created.

" + }, + "status":{ + "shape":"InferenceProfileStatus", + "documentation":"

The status of the inference profile. ACTIVE means that the inference profile is ready to be used.

" + } + } + }, "CreateModelCopyJobRequest":{ "type":"structure", "required":[ @@ -1629,6 +1715,23 @@ "members":{ } }, + "DeleteInferenceProfileRequest":{ + "type":"structure", + "required":["inferenceProfileIdentifier"], + "members":{ + "inferenceProfileIdentifier":{ + "shape":"InferenceProfileIdentifier", + "documentation":"

The Amazon Resource Name (ARN) or ID of the application inference profile to delete.

", + "location":"uri", + "locationName":"inferenceProfileIdentifier" + } + } + }, + "DeleteInferenceProfileResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteModelInvocationLoggingConfigurationRequest":{ "type":"structure", "members":{ @@ -1864,7 +1967,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z]{2}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))" + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:imported-model/[a-z0-9]{12})|([0-9]{12}:application-inference-profile/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z]{2}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))" }, "EvaluationModelIdentifiers":{ "type":"list", @@ -2447,7 +2550,7 @@ "members":{ "inferenceProfileIdentifier":{ "shape":"InferenceProfileIdentifier", - "documentation":"

The unique identifier of the inference profile.

", + "documentation":"

The ID or Amazon Resource Name (ARN) of the inference profile.

", "location":"uri", "locationName":"inferenceProfileIdentifier" } @@ -2457,8 +2560,8 @@ "type":"structure", "required":[ "inferenceProfileName", - "models", "inferenceProfileArn", + "models", "inferenceProfileId", "status", "type" @@ -2468,10 +2571,6 @@ "shape":"InferenceProfileName", "documentation":"

The name of the inference profile.

" }, - "models":{ - "shape":"InferenceProfileModels", - "documentation":"

A list of information about each model in the inference profile.

" - }, "description":{ "shape":"InferenceProfileDescription", "documentation":"

The description of the inference profile.

" @@ -2488,17 +2587,21 @@ "shape":"InferenceProfileArn", "documentation":"

The Amazon Resource Name (ARN) of the inference profile.

" }, + "models":{ + "shape":"InferenceProfileModels", + "documentation":"

A list of information about each model in the inference profile.

" + }, "inferenceProfileId":{ "shape":"InferenceProfileId", "documentation":"

The unique identifier of the inference profile.

" }, "status":{ "shape":"InferenceProfileStatus", - "documentation":"

The status of the inference profile. ACTIVE means that the inference profile is available to use.

" + "documentation":"

The status of the inference profile. ACTIVE means that the inference profile is ready to be used.

" }, "type":{ "shape":"InferenceProfileType", - "documentation":"

The type of the inference profile. SYSTEM_DEFINED means that the inference profile is defined by Amazon Bedrock.

" + "documentation":"

The type of the inference profile. The following types are possible:

" } } }, @@ -3821,13 +3924,14 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):inference-profile/[a-zA-Z0-9-:.]+" + "pattern":"arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+" }, "InferenceProfileDescription":{ "type":"string", - "max":500, + "max":200, "min":1, - "pattern":".+" + "pattern":"([0-9a-zA-Z:.][ _-]?)+", + "sensitive":true }, "InferenceProfileId":{ "type":"string", @@ -3839,7 +3943,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):inference-profile/)?[a-zA-Z0-9-:.]+" + "pattern":"(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):(inference-profile|application-inference-profile)/)?[a-zA-Z0-9-:.]+" }, "InferenceProfileModel":{ "type":"structure", @@ -3851,6 +3955,23 @@ }, "documentation":"

Contains information about a model.

" }, + "InferenceProfileModelSource":{ + "type":"structure", + "members":{ + "copyFrom":{ + "shape":"InferenceProfileModelSourceArn", + "documentation":"

The ARN of the model or system-defined inference profile that is the source for the inference profile.

" + } + }, + "documentation":"

Contains information about the model or system-defined inference profile that is the source for an inference profile..

", + "union":true + }, + "InferenceProfileModelSourceArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{0,20}):(|[0-9]{12}):(inference-profile|foundation-model)/[a-zA-Z0-9-:.]+" + }, "InferenceProfileModels":{ "type":"list", "member":{"shape":"InferenceProfileModel"}, @@ -3875,8 +3996,8 @@ "type":"structure", "required":[ "inferenceProfileName", - "models", "inferenceProfileArn", + "models", "inferenceProfileId", "status", "type" @@ -3886,10 +4007,6 @@ "shape":"InferenceProfileName", "documentation":"

The name of the inference profile.

" }, - "models":{ - "shape":"InferenceProfileModels", - "documentation":"

A list of information about each model in the inference profile.

" - }, "description":{ "shape":"InferenceProfileDescription", "documentation":"

The description of the inference profile.

" @@ -3906,24 +4023,31 @@ "shape":"InferenceProfileArn", "documentation":"

The Amazon Resource Name (ARN) of the inference profile.

" }, + "models":{ + "shape":"InferenceProfileModels", + "documentation":"

A list of information about each model in the inference profile.

" + }, "inferenceProfileId":{ "shape":"InferenceProfileId", "documentation":"

The unique identifier of the inference profile.

" }, "status":{ "shape":"InferenceProfileStatus", - "documentation":"

The status of the inference profile. ACTIVE means that the inference profile is available to use.

" + "documentation":"

The status of the inference profile. ACTIVE means that the inference profile is ready to be used.

" }, "type":{ "shape":"InferenceProfileType", - "documentation":"

The type of the inference profile. SYSTEM_DEFINED means that the inference profile is defined by Amazon Bedrock.

" + "documentation":"

The type of the inference profile. The following types are possible:

" } }, "documentation":"

Contains information about an inference profile.

" }, "InferenceProfileType":{ "type":"string", - "enum":["SYSTEM_DEFINED"] + "enum":[ + "SYSTEM_DEFINED", + "APPLICATION" + ] }, "InferenceType":{ "type":"string", @@ -4266,6 +4390,12 @@ "documentation":"

If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results.

", "location":"querystring", "locationName":"nextToken" + }, + "typeEquals":{ + "shape":"InferenceProfileType", + "documentation":"

Filters for inference profiles that match the type you specify.

", + "location":"querystring", + "locationName":"type" } } }, @@ -5541,7 +5671,7 @@ "type":"string", "max":1011, "min":20, - "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model)/[a-z0-9]{12}$))).*" + "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model)/[a-z0-9]{12}$))).*" }, "ThrottlingException":{ "type":"structure", diff --git a/botocore/data/cleanrooms/2022-02-17/service-2.json b/botocore/data/cleanrooms/2022-02-17/service-2.json index b427d20b03..d90a3bac7b 100644 --- a/botocore/data/cleanrooms/2022-02-17/service-2.json +++ b/botocore/data/cleanrooms/2022-02-17/service-2.json @@ -1188,6 +1188,7 @@ "output":{"shape":"PopulateIdMappingTableOutput"}, "errors":[ {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -2011,7 +2012,7 @@ "type":"string", "max":200, "min":0, - "pattern":"(ANY_QUERY|arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+)" + "pattern":"(ANY_QUERY|ANY_JOB|arn:aws:cleanrooms:[\\w]{2}-[\\w]{4,9}-[\\d]:[\\d]{12}:membership/[\\d\\w-]+/analysistemplate/[\\d\\w-]+)" }, "AnalysisTemplateIdentifier":{ "type":"string", @@ -2145,6 +2146,13 @@ "ADDITIONAL_ANALYSIS" ] }, + "AnalyticsEngine":{ + "type":"string", + "enum":[ + "SPARK", + "CLEAN_ROOMS_SQL" + ] + }, "BatchGetCollaborationAnalysisTemplateError":{ "type":"structure", "required":[ @@ -2345,6 +2353,17 @@ } } }, + "BilledResourceUtilization":{ + "type":"structure", + "required":["units"], + "members":{ + "units":{ + "shape":"Double", + "documentation":"

The number of Clean Rooms Processing Unit (CRPU) hours that have been billed.

" + } + }, + "documentation":"

Information related to the utilization of resources that have been billed or charged for in a given context, such as a protected query.

" + }, "Boolean":{ "type":"boolean", "box":true @@ -2420,6 +2439,10 @@ "queryLogStatus":{ "shape":"CollaborationQueryLogStatus", "documentation":"

An indicator as to whether query logging has been enabled or disabled for the collaboration.

" + }, + "analyticsEngine":{ + "shape":"AnalyticsEngine", + "documentation":"

The analytics engine for the collaboration.

" } }, "documentation":"

The multi-party data share environment. The collaboration contains metadata about its purpose and participants.

" @@ -2611,7 +2634,7 @@ }, "creatorAccountId":{ "shape":"AccountId", - "documentation":"

The identifier used to reference members of the collaboration. Only supports AWS account ID.

" + "documentation":"

The identifier used to reference members of the collaboration. Only supports Amazon Web Services account ID.

" }, "createTime":{ "shape":"Timestamp", @@ -2667,7 +2690,7 @@ }, "creatorAccountId":{ "shape":"AccountId", - "documentation":"

The identifier used to reference members of the collaboration. Only supports AWS account ID.

" + "documentation":"

The identifier used to reference members of the collaboration. Only supports Amazon Web Services account ID.

" }, "description":{ "shape":"ResourceDescription", @@ -3058,6 +3081,10 @@ "membershipArn":{ "shape":"MembershipArn", "documentation":"

The ARN of a member in a collaboration.

" + }, + "analyticsEngine":{ + "shape":"AnalyticsEngine", + "documentation":"

The analytics engine.

" } }, "documentation":"

The metadata of the collaboration.

" @@ -3100,6 +3127,17 @@ "min":0, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\t]*" }, + "ComputeConfiguration":{ + "type":"structure", + "members":{ + "worker":{ + "shape":"WorkerComputeConfiguration", + "documentation":"

The worker configuration for the compute environment.

" + } + }, + "documentation":"

The configuration of the compute resources for an analysis with the Spark analytics engine.

", + "union":true + }, "ConfigurationDetails":{ "type":"structure", "members":{ @@ -3850,6 +3888,10 @@ "creatorPaymentConfiguration":{ "shape":"PaymentConfiguration", "documentation":"

The collaboration creator's payment responsibilities set by the collaboration creator.

If the collaboration creator hasn't specified anyone as the member paying for query compute costs, then the member who can query is the default payer.

" + }, + "analyticsEngine":{ + "shape":"AnalyticsEngine", + "documentation":"

The analytics engine.

" } } }, @@ -5944,13 +5986,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -5962,7 +6004,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "analysisTemplateSummaries":{ "shape":"AnalysisTemplateSummaryList", @@ -5982,13 +6024,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6000,7 +6042,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "collaborationAnalysisTemplateSummaries":{ "shape":"CollaborationAnalysisTemplateSummaryList", @@ -6020,13 +6062,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6042,7 +6084,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -6096,13 +6138,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6114,7 +6156,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "collaborationPrivacyBudgetTemplateSummaries":{ "shape":"CollaborationPrivacyBudgetTemplateSummaryList", @@ -6143,13 +6185,13 @@ }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" } @@ -6165,7 +6207,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -6174,13 +6216,13 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" }, @@ -6198,7 +6240,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "collaborationList":{ "shape":"CollaborationSummaryList", @@ -6218,13 +6260,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6256,13 +6298,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6278,7 +6320,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -6287,13 +6329,13 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6309,7 +6351,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -6401,13 +6443,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6419,7 +6461,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "memberSummaries":{ "shape":"MemberSummaryList", @@ -6432,13 +6474,13 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" }, @@ -6456,7 +6498,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "membershipSummaries":{ "shape":"MembershipSummaryList", @@ -6476,13 +6518,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6494,7 +6536,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "privacyBudgetTemplateSummaries":{ "shape":"PrivacyBudgetTemplateSummaryList", @@ -6523,13 +6565,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6545,7 +6587,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -6567,13 +6609,13 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service can return a nextToken even if the maximum results has not been met.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6585,7 +6627,7 @@ "members":{ "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" }, "protectedQueries":{ "shape":"ProtectedQuerySummaryList", @@ -6605,19 +6647,19 @@ }, "schemaType":{ "shape":"SchemaType", - "documentation":"

If present, filter schemas by schema type. The only valid schema type is currently `TABLE`.

", + "documentation":"

If present, filter schemas by schema type.

", "location":"querystring", "locationName":"schemaType" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

", + "documentation":"

The pagination token that's used to fetch the next set of results.

", "location":"querystring", "locationName":"nextToken" }, "maxResults":{ "shape":"MaxResults", - "documentation":"

The maximum size of the results that is returned per call.

", + "documentation":"

The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met.

", "location":"querystring", "locationName":"maxResults" } @@ -6633,7 +6675,7 @@ }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

The token value retrieved from a previous call to access the next page of results.

" + "documentation":"

The pagination token that's used to fetch the next set of results.

" } } }, @@ -7031,7 +7073,20 @@ "TIMESTAMPTZ", "TIME", "TIMETZ", - "VARBYTE" + "VARBYTE", + "BINARY", + "BYTE", + "CHARACTER", + "DOUBLE", + "FLOAT", + "INT", + "LONG", + "NUMERIC", + "SHORT", + "STRING", + "TIMESTAMP_LTZ", + "TIMESTAMP_NTZ", + "TINYINT" ] }, "ParameterValue":{ @@ -7439,6 +7494,10 @@ "differentialPrivacy":{ "shape":"DifferentialPrivacyParameters", "documentation":"

The sensitivity parameters of the differential privacy results of the protected query.

" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"

The compute configuration for the protected query.

" } }, "documentation":"

The parameters for an Clean Rooms protected query.

" @@ -7563,6 +7622,10 @@ "keyPrefix":{ "shape":"KeyPrefix", "documentation":"

The S3 prefix to unload the protected query results.

" + }, + "singleFileOutput":{ + "shape":"Boolean", + "documentation":"

Indicates whether files should be output as a single file (TRUE) or output as multiple files (FALSE). This parameter is only supported for analyses with the Spark analytics engine.

" } }, "documentation":"

Contains the configuration to write the query results to S3.

" @@ -7614,6 +7677,10 @@ "totalDurationInMillis":{ "shape":"Long", "documentation":"

The duration of the protected query, from creation until query completion.

" + }, + "billedResourceUtilization":{ + "shape":"BilledResourceUtilization", + "documentation":"

The billed resource utilization.

" } }, "documentation":"

Contains statistics about the execution of the protected query.

" @@ -7659,7 +7726,7 @@ }, "status":{ "shape":"ProtectedQueryStatus", - "documentation":"

The status of the protected query. Value values are `SUBMITTED`, `STARTED`, `CANCELLED`, `CANCELLING`, `FAILED`, `SUCCESS`, `TIMED_OUT`.

" + "documentation":"

The status of the protected query.

" }, "receiverConfigurations":{ "shape":"ReceiverConfigurationsList", @@ -7852,7 +7919,7 @@ "members":{ "columns":{ "shape":"ColumnList", - "documentation":"

The columns for the relation this schema represents.

" + "documentation":"

The columns for the relation that this schema represents.

" }, "partitionKeys":{ "shape":"ColumnList", @@ -7860,11 +7927,11 @@ }, "analysisRuleTypes":{ "shape":"AnalysisRuleTypeList", - "documentation":"

The analysis rule types associated with the schema. Currently, only one entry is present.

" + "documentation":"

The analysis rule types that are associated with the schema. Currently, only one entry is present.

" }, "analysisMethod":{ "shape":"AnalysisMethod", - "documentation":"

The analysis method for the schema. The only valid value is currently DIRECT_QUERY.

" + "documentation":"

The analysis method for the schema. The only valid value is currently DIRECT_QUERY.

" }, "creatorAccountId":{ "shape":"AccountId", @@ -7880,7 +7947,7 @@ }, "collaborationArn":{ "shape":"CollaborationArn", - "documentation":"

The unique ARN for the collaboration that the schema belongs to.

" + "documentation":"

The unique Amazon Resource Name (ARN) for the collaboration that the schema belongs to.

" }, "description":{ "shape":"TableDescription", @@ -7888,15 +7955,15 @@ }, "createTime":{ "shape":"Timestamp", - "documentation":"

The time the schema was created.

" + "documentation":"

The time at which the schema was created.

" }, "updateTime":{ "shape":"Timestamp", - "documentation":"

The time the schema was last updated.

" + "documentation":"

The most recent time at which the schema was updated.

" }, "type":{ "shape":"SchemaType", - "documentation":"

The type of schema. The only valid value is currently `TABLE`.

" + "documentation":"

The type of schema.

" }, "schemaStatusDetails":{ "shape":"SchemaStatusDetailList", @@ -8051,7 +8118,7 @@ }, "type":{ "shape":"SchemaType", - "documentation":"

The type of schema object. The only valid schema type is currently `TABLE`.

" + "documentation":"

The type of schema object.

" }, "creatorAccountId":{ "shape":"AccountId", @@ -8156,6 +8223,10 @@ "resultConfiguration":{ "shape":"ProtectedQueryResultConfiguration", "documentation":"

The details needed to write the query results.

" + }, + "computeConfiguration":{ + "shape":"ComputeConfiguration", + "documentation":"

The compute configuration for the protected query.

" } } }, @@ -8787,6 +8858,33 @@ "INVALID_QUERY", "IAM_SYNCHRONIZATION_DELAY" ] + }, + "WorkerComputeConfiguration":{ + "type":"structure", + "members":{ + "type":{ + "shape":"WorkerComputeType", + "documentation":"

The worker compute configuration type.

" + }, + "number":{ + "shape":"WorkerComputeConfigurationNumberInteger", + "documentation":"

The number of workers.

" + } + }, + "documentation":"

The configuration of the compute resources for workers running an analysis with the Clean Rooms SQL analytics engine.

" + }, + "WorkerComputeConfigurationNumberInteger":{ + "type":"integer", + "box":true, + "max":400, + "min":2 + }, + "WorkerComputeType":{ + "type":"string", + "enum":[ + "CR.1X", + "CR.4X" + ] } }, "documentation":"

Welcome to the Clean Rooms API Reference.

Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data.

To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide.

To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference.

" diff --git a/botocore/data/iotfleetwise/2021-06-17/service-2.json b/botocore/data/iotfleetwise/2021-06-17/service-2.json index c90f235b97..134d566b0d 100644 --- a/botocore/data/iotfleetwise/2021-06-17/service-2.json +++ b/botocore/data/iotfleetwise/2021-06-17/service-2.json @@ -58,6 +58,7 @@ "output":{"shape":"BatchUpdateVehicleResponse"}, "errors":[ {"shape":"InternalServerException"}, + {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} @@ -939,6 +940,7 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} @@ -4942,6 +4944,7 @@ "createVehicleRequestItems":{ "type":"list", "member":{"shape":"CreateVehicleRequestItem"}, + "max":10, "min":1 }, "createVehicleResponses":{ @@ -5073,6 +5076,7 @@ "updateVehicleRequestItems":{ "type":"list", "member":{"shape":"UpdateVehicleRequestItem"}, + "max":10, "min":1 }, "updateVehicleResponseItems":{ diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 4c0aebecd0..a7d5a36749 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -1474,6 +1474,7 @@ } } }, + "Baseline":{"type":"boolean"}, "Boolean":{"type":"boolean"}, "CancelExportTaskRequest":{ "type":"structure", @@ -5170,6 +5171,10 @@ "suppressionPeriod":{ "shape":"SuppressionPeriod", "documentation":"

If you are temporarily suppressing an anomaly or pattern, use this structure to specify how long the suppression is to last.

" + }, + "baseline":{ + "shape":"Baseline", + "documentation":"

Set this to true to prevent CloudWatch Logs from displaying this behavior as an anomaly in the future. The behavior is then treated as baseline behavior. However, if similar but more severe occurrences of this behavior occur in the future, those will still be reported as anomalies.

The default is false

" } } }, diff --git a/botocore/data/redshift-data/2019-12-20/paginators-1.json b/botocore/data/redshift-data/2019-12-20/paginators-1.json index b33a4ab753..ba46aa4b28 100644 --- a/botocore/data/redshift-data/2019-12-20/paginators-1.json +++ b/botocore/data/redshift-data/2019-12-20/paginators-1.json @@ -34,6 +34,11 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Tables" + }, + "GetStatementResultV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "Records" } } } diff --git a/botocore/data/redshift-data/2019-12-20/service-2.json b/botocore/data/redshift-data/2019-12-20/service-2.json index 3c0818ad76..c4698660dc 100644 --- a/botocore/data/redshift-data/2019-12-20/service-2.json +++ b/botocore/data/redshift-data/2019-12-20/service-2.json @@ -11,7 +11,8 @@ "signatureVersion":"v4", "signingName":"redshift-data", "targetPrefix":"RedshiftData", - "uid":"redshift-data-2019-12-20" + "uid":"redshift-data-2019-12-20", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchExecuteStatement":{ @@ -108,7 +109,22 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" + "documentation":"

Fetches the temporarily cached result of an SQL statement in JSON format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as JSON , or let the format default to JSON. A token is returned to page through the statement results.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" + }, + "GetStatementResultV2":{ + "name":"GetStatementResultV2", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStatementResultV2Request"}, + "output":{"shape":"GetStatementResultV2Response"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Fetches the temporarily cached result of an SQL statement in CSV format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as CSV. A token is returned to page through the statement results.

For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide.

" }, "ListDatabases":{ "name":"ListDatabases", @@ -228,6 +244,10 @@ "shape":"String", "documentation":"

The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials.

" }, + "ResultFormat":{ + "shape":"ResultFormatString", + "documentation":"

The data format of the result of the SQL statement. If no format is specified, the default is JSON.

" + }, "SecretArn":{ "shape":"SecretArn", "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" @@ -343,8 +363,7 @@ "ClusterIdentifierString":{ "type":"string", "max":63, - "min":1, - "pattern":"^[a-z]([a-z0-9]|-[a-z0-9])*$" + "min":1 }, "ColumnList":{ "type":"list", @@ -492,6 +511,10 @@ "shape":"Long", "documentation":"

The identifier of the query generated by Amazon Redshift. These identifiers are also available in the query column of the STL_QUERY system view.

" }, + "ResultFormat":{ + "shape":"ResultFormatString", + "documentation":"

The data format of the result of the SQL statement.

" + }, "ResultRows":{ "shape":"Long", "documentation":"

Either the number of rows returned from the SQL statement or the number of rows affected. If result size is greater than zero, the result rows can be the number of rows affected by SQL statements such as INSERT, UPDATE, DELETE, COPY, and others. A -1 indicates the value is null.

" @@ -634,6 +657,10 @@ "shape":"SqlParametersList", "documentation":"

The parameters for the SQL statement.

" }, + "ResultFormat":{ + "shape":"ResultFormatString", + "documentation":"

The data format of the result of the SQL statement. If no format is specified, the default is JSON.

" + }, "SecretArn":{ "shape":"SecretArn", "documentation":"

The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

" @@ -740,6 +767,10 @@ "type":"list", "member":{"shape":"Field"} }, + "FormattedSqlRecords":{ + "type":"list", + "member":{"shape":"QueryRecords"} + }, "GetStatementResultRequest":{ "type":"structure", "required":["Id"], @@ -768,7 +799,7 @@ }, "Records":{ "shape":"SqlRecords", - "documentation":"

The results of the SQL statement.

" + "documentation":"

The results of the SQL statement in JSON format.

" }, "TotalNumRows":{ "shape":"Long", @@ -776,6 +807,46 @@ } } }, + "GetStatementResultV2Request":{ + "type":"structure", + "required":["Id"], + "members":{ + "Id":{ + "shape":"UUID", + "documentation":"

The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates then number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query. This identifier is returned by BatchExecuteStatment, ExecuteStatment, and ListStatements.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

" + } + } + }, + "GetStatementResultV2Response":{ + "type":"structure", + "required":["Records"], + "members":{ + "ColumnMetadata":{ + "shape":"ColumnMetadataList", + "documentation":"

The properties (metadata) of a column.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request.

" + }, + "Records":{ + "shape":"FormattedSqlRecords", + "documentation":"

The results of the SQL statement in CSV format.

" + }, + "ResultFormat":{ + "shape":"ResultFormatString", + "documentation":"

The data format of the result of the SQL statement.

" + }, + "TotalNumRows":{ + "shape":"Long", + "documentation":"

The total number of rows in the result set returned from a query. You can use this number to estimate the number of calls to the GetStatementResultV2 operation needed to page through the results.

" + } + } + }, "Integer":{"type":"integer"}, "InternalServerException":{ "type":"structure", @@ -1009,6 +1080,17 @@ "type":"string", "min":1 }, + "QueryRecords":{ + "type":"structure", + "members":{ + "CSVRecords":{ + "shape":"String", + "documentation":"

The results of the SQL statement in CSV format.

" + } + }, + "documentation":"

The results of the SQL statement.

", + "union":true + }, "QueryTimeoutException":{ "type":"structure", "members":{ @@ -1036,6 +1118,13 @@ "documentation":"

The Amazon Redshift Data API operation failed due to a missing resource.

", "exception":true }, + "ResultFormatString":{ + "type":"string", + "enum":[ + "JSON", + "CSV" + ] + }, "SchemaList":{ "type":"list", "member":{"shape":"String"} @@ -1108,6 +1197,10 @@ "shape":"StatementStringList", "documentation":"

One or more SQL statements. Each query string in the array corresponds to one of the queries in a batch query request.

" }, + "ResultFormat":{ + "shape":"ResultFormatString", + "documentation":"

The data format of the result of the SQL statement.

" + }, "SecretArn":{ "shape":"SecretArn", "documentation":"

The name or Amazon Resource Name (ARN) of the secret that enables access to the database.

" diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 6586ab6ddc..dac3cca2e0 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -4637,10 +4637,10 @@ "members":{ "AnnotationConsolidationLambdaArn":{ "shape":"LambdaFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

This parameter is required for all labeling jobs. For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic Segmentation Adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

Semantic Segmentation Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Bounding Box Adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Bounding Box Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

3D Point Cloud Object Detection Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

3D Point Cloud Object Tracking Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

3D Point Cloud Semantic Segmentation Adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

" + "documentation":"

The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.

For built-in task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation Lambda.

Bounding box - Finds the most similar boxes from different workers based on the Jaccard index of the boxes.

Image classification - Uses a variant of the Expectation Maximization approach to estimate the true class of an image based on annotations from individual workers.

Multi-label image classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of an image based on annotations from individual workers.

Semantic segmentation - Treats each pixel in an image as a multi-class classification and treats pixel annotations from workers as \"votes\" for the correct label.

Text classification - Uses a variant of the Expectation Maximization approach to estimate the true class of text based on annotations from individual workers.

Multi-label text classification - Uses a variant of the Expectation Maximization approach to estimate the true classes of text based on annotations from individual workers.

Named entity recognition - Groups similar selections and calculates aggregate boundaries, resolving to most-assigned label.

Video Classification - Use this task type when you need workers to classify videos using predefined labels that you specify. Workers are shown videos and are asked to choose one label for each video.

Video Frame Object Detection - Use this task type to have workers identify and locate objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to identify and localize various objects in a series of video frames, such as cars, bikes, and pedestrians.

Video Frame Object Tracking - Use this task type to have workers track the movement of objects in a sequence of video frames (images extracted from a video) using bounding boxes. For example, you can use this task to ask workers to track the movement of objects, such as cars, bikes, and pedestrians.

3D Point Cloud Object Detection - Use this task type when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids around objects. For example, you can use this task type to ask workers to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

3D Point Cloud Object Tracking - Use this task type when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D point cloud frames. For example, you can use this task type to ask workers to track the movement of vehicles across multiple point cloud frames.

3D Point Cloud Semantic Segmentation - Use this task type when you want workers to create a point-level semantic segmentation masks by painting objects in a 3D point cloud using different colors where each color is assigned to one of the classes you specify.

Use the following ARNs for Label Verification and Adjustment Jobs

Use label verification and adjustment jobs to review and adjust labels. To learn more, see Verify and Adjust Labels .

Semantic Segmentation Adjustment - Treats each pixel in an image as a multi-class classification and treats pixel adjusted annotations from workers as \"votes\" for the correct label.

Semantic Segmentation Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgment for semantic segmentation labels based on annotations from individual workers.

Bounding Box Adjustment - Finds the most similar boxes from different workers based on the Jaccard index of the adjusted annotations.

Bounding Box Verification - Uses a variant of the Expectation Maximization approach to estimate the true class of verification judgement for bounding box labels based on annotations from individual workers.

Video Frame Object Detection Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to classify and localize objects in a sequence of video frames.

Video Frame Object Tracking Adjustment - Use this task type when you want workers to adjust bounding boxes that workers have added to video frames to track object movement across a sequence of video frames.

3D Point Cloud Object Detection Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.

3D Point Cloud Object Tracking Adjustment - Use this task type when you want workers to adjust 3D cuboids around objects that appear in a sequence of 3D point cloud frames.

3D Point Cloud Semantic Segmentation Adjustment - Use this task type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.

" } }, - "documentation":"

Configures how labels are consolidated across human workers and processes output data.

" + "documentation":"

Configures how labels are consolidated across human workers and processes output data.

" }, "AppArn":{ "type":"string", @@ -8967,7 +8967,7 @@ }, "DefaultSpaceSettings":{ "shape":"DefaultSpaceSettings", - "documentation":"

The default settings used to create a space.

" + "documentation":"

The default settings for shared spaces that users create in the domain.

" } } }, @@ -10354,7 +10354,7 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify a list of EI instance types to associate with this notebook instance.

" }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", @@ -11739,7 +11739,7 @@ "documentation":"

The settings for assigning a custom file system to a domain. Permitted users can access this file system in Amazon SageMaker Studio.

" } }, - "documentation":"

A collection of settings that apply to spaces created in the domain.

" + "documentation":"

The default settings for shared spaces that users create in the domain.

SageMaker applies these settings only to shared spaces. It doesn't apply them to private spaces.

" }, "DefaultSpaceStorageSettings":{ "type":"structure", @@ -13780,7 +13780,7 @@ }, "DefaultSpaceSettings":{ "shape":"DefaultSpaceSettings", - "documentation":"

The default settings used to create a space.

" + "documentation":"

The default settings for shared spaces that users create in the domain.

" } } }, @@ -15968,7 +15968,7 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of the Elastic Inference (EI) instance types associated with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify a list of the EI instance types associated with this notebook instance.

" }, "DefaultCodeRepository":{ "shape":"CodeRepositoryNameOrUrl", @@ -30547,7 +30547,7 @@ }, "CompilerOptions":{ "shape":"CompilerOptions", - "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" + "documentation":"

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" }, "KmsKeyId":{ "shape":"KmsKeyId", @@ -30821,7 +30821,7 @@ }, "AcceleratorType":{ "shape":"ProductionVariantAcceleratorType", - "documentation":"

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify the size of the EI instance to use for the production variant.

" }, "VariantStatus":{ "shape":"ProductionVariantStatusList", @@ -31302,7 +31302,7 @@ "PlatformIdentifier":{ "type":"string", "max":15, - "pattern":"^(notebook-al1-v1|notebook-al2-v1|notebook-al2-v2)$" + "pattern":"^(notebook-al1-v1|notebook-al2-v1|notebook-al2-v2|notebook-al2-v3)$" }, "PolicyString":{ "type":"string", @@ -31852,7 +31852,7 @@ }, "AcceleratorType":{ "shape":"ProductionVariantAcceleratorType", - "documentation":"

The size of the Elastic Inference (EI) instance to use for the production variant. EI instances provide on-demand GPU computing for inference. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify the size of the EI instance to use for the production variant.

" }, "CoreDumpConfig":{ "shape":"ProductionVariantCoreDumpConfig", @@ -36466,6 +36466,8 @@ "ml.p4d.24xlarge", "ml.p4de.24xlarge", "ml.p5.48xlarge", + "ml.p5e.48xlarge", + "ml.p5en.48xlarge", "ml.c5.xlarge", "ml.c5.2xlarge", "ml.c5.4xlarge", @@ -36487,6 +36489,7 @@ "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge", + "ml.trn2.48xlarge", "ml.m6i.large", "ml.m6i.xlarge", "ml.m6i.2xlarge", @@ -38149,7 +38152,7 @@ }, "DefaultSpaceSettings":{ "shape":"DefaultSpaceSettings", - "documentation":"

The default settings used to create a space within the domain.

" + "documentation":"

The default settings for shared spaces that users create in the domain.

" }, "SubnetIds":{ "shape":"Subnets", @@ -38775,11 +38778,11 @@ }, "AcceleratorTypes":{ "shape":"NotebookInstanceAcceleratorTypes", - "documentation":"

A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify a list of the EI instance types to associate with this notebook instance.

" }, "DisassociateAcceleratorTypes":{ "shape":"DisassociateNotebookInstanceAcceleratorTypes", - "documentation":"

A list of the Elastic Inference (EI) instance types to remove from this notebook instance. This operation is idempotent. If you specify an accelerator type that is not associated with the notebook instance when you call this method, it does not throw an error.

" + "documentation":"

This parameter is no longer supported. Elastic Inference (EI) is no longer available.

This parameter was used to specify a list of the EI instance types to remove from this notebook instance.

" }, "DisassociateDefaultCodeRepository":{ "shape":"DisassociateDefaultCodeRepository", @@ -39274,11 +39277,11 @@ "members":{ "ExecutionRole":{ "shape":"RoleArn", - "documentation":"

The execution role for the user.

" + "documentation":"

The execution role for the user.

SageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.

" }, "SecurityGroups":{ "shape":"SecurityGroupIds", - "documentation":"

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.

Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly.

Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain.

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.

" + "documentation":"

The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.

Optional when the CreateDomain.AppNetworkAccessType parameter is set to PublicInternetOnly.

Required when the CreateDomain.AppNetworkAccessType parameter is set to VpcOnly, unless specified as part of the DefaultUserSettings for the domain.

Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "SharingSettings":{ "shape":"SharingSettings", @@ -39306,19 +39309,19 @@ }, "CanvasAppSettings":{ "shape":"CanvasAppSettings", - "documentation":"

The Canvas app settings.

" + "documentation":"

The Canvas app settings.

SageMaker applies these settings only to private spaces that SageMaker creates for the Canvas app.

" }, "CodeEditorAppSettings":{ "shape":"CodeEditorAppSettings", - "documentation":"

The Code Editor application settings.

" + "documentation":"

The Code Editor application settings.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "JupyterLabAppSettings":{ "shape":"JupyterLabAppSettings", - "documentation":"

The settings for the JupyterLab application.

" + "documentation":"

The settings for the JupyterLab application.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "SpaceStorageSettings":{ "shape":"DefaultSpaceStorageSettings", - "documentation":"

The storage settings for a space.

" + "documentation":"

The storage settings for a space.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "DefaultLandingUri":{ "shape":"LandingUri", @@ -39330,11 +39333,11 @@ }, "CustomPosixUserConfig":{ "shape":"CustomPosixUserConfig", - "documentation":"

Details about the POSIX identity that is used for file system operations.

" + "documentation":"

Details about the POSIX identity that is used for file system operations.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "CustomFileSystemConfigs":{ "shape":"CustomFileSystemConfigs", - "documentation":"

The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.

" + "documentation":"

The settings for assigning a custom file system to a user profile. Permitted users can access this file system in Amazon SageMaker Studio.

SageMaker applies these settings only to private spaces that the user creates in the domain. SageMaker doesn't apply these settings to shared spaces.

" }, "StudioWebPortalSettings":{ "shape":"StudioWebPortalSettings", @@ -39342,7 +39345,7 @@ }, "AutoMountHomeEFS":{ "shape":"AutoMountHomeEFS", - "documentation":"

Indicates whether auto-mounting of an EFS volume is supported for the user profile. The DefaultAsDomain value is only supported for user profiles. Do not use the DefaultAsDomain value when setting this parameter for a domain.

" + "documentation":"

Indicates whether auto-mounting of an EFS volume is supported for the user profile. The DefaultAsDomain value is only supported for user profiles. Do not use the DefaultAsDomain value when setting this parameter for a domain.

SageMaker applies this setting only to private spaces that the user creates in the domain. SageMaker doesn't apply this setting to shared spaces.

" } }, "documentation":"

A collection of settings that apply to users in a domain. These settings are specified when the CreateUserProfile API is called, and as DefaultUserSettings when the CreateDomain API is called.

SecurityGroups is aggregated when specified in both calls. For all other settings in UserSettings, the values specified in CreateUserProfile take precedence over those specified in CreateDomain.

" From 77cb136eb947303e2f607050b5e5e8b26ced444e Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Oct 2024 18:12:44 +0000 Subject: [PATCH 3/4] Update endpoints model --- botocore/data/endpoints.json | 1 + 1 file changed, 1 insertion(+) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 5a517bed70..402bb50da5 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -20929,6 +20929,7 @@ "tags" : [ "dualstack" ] } ] }, + "eu-south-2" : { }, "eu-west-1" : { "variants" : [ { "hostname" : "textract.eu-west-1.api.aws", From 1e1d00d8e4646a590bac7f6493d407595a069289 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 29 Oct 2024 18:14:13 +0000 Subject: [PATCH 4/4] Bumping version to 1.35.51 --- .changes/1.35.51.json | 37 +++++++++++++++++++ .../api-change-bedrock-56802.json | 5 --- .../api-change-bedrockruntime-5342.json | 5 --- .../api-change-cleanrooms-55004.json | 5 --- .../api-change-iotfleetwise-54553.json | 5 --- .../next-release/api-change-logs-88718.json | 5 --- .../api-change-redshiftdata-42855.json | 5 --- .../api-change-sagemaker-80215.json | 5 --- CHANGELOG.rst | 12 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 11 files changed, 51 insertions(+), 37 deletions(-) create mode 100644 .changes/1.35.51.json delete mode 100644 .changes/next-release/api-change-bedrock-56802.json delete mode 100644 .changes/next-release/api-change-bedrockruntime-5342.json delete mode 100644 .changes/next-release/api-change-cleanrooms-55004.json delete mode 100644 .changes/next-release/api-change-iotfleetwise-54553.json delete mode 100644 .changes/next-release/api-change-logs-88718.json delete mode 100644 .changes/next-release/api-change-redshiftdata-42855.json delete mode 100644 .changes/next-release/api-change-sagemaker-80215.json diff --git a/.changes/1.35.51.json b/.changes/1.35.51.json new file mode 100644 index 0000000000..2a2ef4e1a6 --- /dev/null +++ b/.changes/1.35.51.json @@ -0,0 +1,37 @@ +[ + { + "category": "``bedrock``", + "description": "Update Application Inference Profile", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Update Application Inference Profile", + "type": "api-change" + }, + { + "category": "``cleanrooms``", + "description": "This release adds the option for customers to configure analytics engine when creating a collaboration, and introduces the new SPARK analytics engine type in addition to maintaining the legacy CLEAN_ROOMS_SQL engine type.", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "Updated BatchCreateVehicle and BatchUpdateVehicle APIs: LimitExceededException has been added and the maximum number of vehicles in a batch has been set to 10 explicitly", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Added support for new optional baseline parameter in the UpdateAnomaly API. For UpdateAnomaly requests with baseline set to True, The anomaly behavior is then treated as baseline behavior. However, more severe occurrences of this behavior will still be reported as anomalies.", + "type": "api-change" + }, + { + "category": "``redshift-data``", + "description": "Adding a new API GetStatementResultV2 that supports CSV formatted results from ExecuteStatement and BatchExecuteStatement calls.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Adding `notebook-al2-v3` as allowed value to SageMaker NotebookInstance PlatformIdentifier attribute", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-bedrock-56802.json b/.changes/next-release/api-change-bedrock-56802.json deleted file mode 100644 index 63fd970317..0000000000 --- a/.changes/next-release/api-change-bedrock-56802.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock``", - "description": "Update Application Inference Profile" -} diff --git a/.changes/next-release/api-change-bedrockruntime-5342.json b/.changes/next-release/api-change-bedrockruntime-5342.json deleted file mode 100644 index c2283d421f..0000000000 --- a/.changes/next-release/api-change-bedrockruntime-5342.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-runtime``", - "description": "Update Application Inference Profile" -} diff --git a/.changes/next-release/api-change-cleanrooms-55004.json b/.changes/next-release/api-change-cleanrooms-55004.json deleted file mode 100644 index fa2f734a81..0000000000 --- a/.changes/next-release/api-change-cleanrooms-55004.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cleanrooms``", - "description": "This release adds the option for customers to configure analytics engine when creating a collaboration, and introduces the new SPARK analytics engine type in addition to maintaining the legacy CLEAN_ROOMS_SQL engine type." -} diff --git a/.changes/next-release/api-change-iotfleetwise-54553.json b/.changes/next-release/api-change-iotfleetwise-54553.json deleted file mode 100644 index e800629749..0000000000 --- a/.changes/next-release/api-change-iotfleetwise-54553.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``iotfleetwise``", - "description": "Updated BatchCreateVehicle and BatchUpdateVehicle APIs: LimitExceededException has been added and the maximum number of vehicles in a batch has been set to 10 explicitly" -} diff --git a/.changes/next-release/api-change-logs-88718.json b/.changes/next-release/api-change-logs-88718.json deleted file mode 100644 index e9a4fbf78e..0000000000 --- a/.changes/next-release/api-change-logs-88718.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``logs``", - "description": "Added support for new optional baseline parameter in the UpdateAnomaly API. For UpdateAnomaly requests with baseline set to True, The anomaly behavior is then treated as baseline behavior. However, more severe occurrences of this behavior will still be reported as anomalies." -} diff --git a/.changes/next-release/api-change-redshiftdata-42855.json b/.changes/next-release/api-change-redshiftdata-42855.json deleted file mode 100644 index 8bc1bdf230..0000000000 --- a/.changes/next-release/api-change-redshiftdata-42855.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``redshift-data``", - "description": "Adding a new API GetStatementResultV2 that supports CSV formatted results from ExecuteStatement and BatchExecuteStatement calls." -} diff --git a/.changes/next-release/api-change-sagemaker-80215.json b/.changes/next-release/api-change-sagemaker-80215.json deleted file mode 100644 index 2eb310f368..0000000000 --- a/.changes/next-release/api-change-sagemaker-80215.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sagemaker``", - "description": "Adding `notebook-al2-v3` as allowed value to SageMaker NotebookInstance PlatformIdentifier attribute" -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 91a7c4d228..908484da99 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.35.51 +======= + +* api-change:``bedrock``: Update Application Inference Profile +* api-change:``bedrock-runtime``: Update Application Inference Profile +* api-change:``cleanrooms``: This release adds the option for customers to configure analytics engine when creating a collaboration, and introduces the new SPARK analytics engine type in addition to maintaining the legacy CLEAN_ROOMS_SQL engine type. +* api-change:``iotfleetwise``: Updated BatchCreateVehicle and BatchUpdateVehicle APIs: LimitExceededException has been added and the maximum number of vehicles in a batch has been set to 10 explicitly +* api-change:``logs``: Added support for new optional baseline parameter in the UpdateAnomaly API. For UpdateAnomaly requests with baseline set to True, The anomaly behavior is then treated as baseline behavior. However, more severe occurrences of this behavior will still be reported as anomalies. +* api-change:``redshift-data``: Adding a new API GetStatementResultV2 that supports CSV formatted results from ExecuteStatement and BatchExecuteStatement calls. +* api-change:``sagemaker``: Adding `notebook-al2-v3` as allowed value to SageMaker NotebookInstance PlatformIdentifier attribute + + 1.35.50 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 9f3ab4ebc9..c33143e68d 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.50' +__version__ = '1.35.51' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index f238d3eaf1..aa13184091 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.50' +release = '1.35.51' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.